diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872044.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872044.json new file mode 100644 index 000000000000..b2cafff31230 --- /dev/null +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872044.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Created new UploadDirectoryWithResponseAsync method on the Amazon.S3.Transfer.TransferUtility class." + ] + } + ] +} \ No newline at end of file diff --git a/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872124.json b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872124.json new file mode 100644 index 000000000000..d5508da3272f --- /dev/null +++ b/generator/.DevConfigs/9d07dc1e-d82d-4f94-8700-c7b57f872124.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Added UploadDirectoryInitiatedEvent, UploadDirectoryCompletedEvent, and UploadDirectoryFailedEvent for Amazon.S3.Transfer.TransferUtility.UploadDirectory." + ] + } + ] +} \ No newline at end of file diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs index 148e34798d47..b6c884a8361f 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs @@ -45,7 +45,43 @@ internal partial class UploadDirectoryCommand : BaseCommand ExecuteAsync(CancellationToken cancellationToken) { - // Step 1: Setup paths and discover files - string prefix = GetKeyPrefix(); - string basePath = new DirectoryInfo(this._request.Directory).FullName; + try + { + // Step 1: Setup paths and discover files + string prefix = GetKeyPrefix(); + string basePath = new DirectoryInfo(this._request.Directory).FullName; - _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Starting - BasePath={0}, Prefix={1}, UploadFilesConcurrently={2}, ConcurrentServiceRequests={3}", - basePath, prefix, UploadFilesConcurrently, this._config.ConcurrentServiceRequests); + _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Starting - BasePath={0}, Prefix={1}, UploadFilesConcurrently={2}, ConcurrentServiceRequests={3}", + basePath, prefix, UploadFilesConcurrently, this._config.ConcurrentServiceRequests); - // Step 2: Discover files to upload - string[] filePaths = await DiscoverFilesAsync(basePath, cancellationToken) - .ConfigureAwait(false); + // Step 2: Discover files to upload + string[] filePaths = await DiscoverFilesAsync(basePath, cancellationToken) + .ConfigureAwait(false); - this._totalNumberOfFiles = filePaths.Length; - _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Discovered {0} file(s) to upload. TotalBytes={1}", - _totalNumberOfFiles, _totalBytes); + this._totalNumberOfFiles = filePaths.Length; + _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Discovered {0} file(s) to upload. TotalBytes={1}", + _totalNumberOfFiles, _totalBytes); - // Step 3: Setup resources and execute uploads - using (var resources = CreateUploadResources(cancellationToken)) - { - await ExecuteParallelUploadsAsync( - filePaths, - basePath, - prefix, - resources, - cancellationToken) - .ConfigureAwait(false); - } + FireTransferInitiatedEvent(); - // Step 4: Build and return response - _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Completed - FilesSuccessfullyUploaded={0}, FilesFailed={1}", - _numberOfFilesSuccessfullyUploaded, _errors.Count); - return BuildResponse(); + // Step 3: Setup resources and execute uploads + using (var resources = CreateUploadResources(cancellationToken)) + { + await ExecuteParallelUploadsAsync( + filePaths, + basePath, + prefix, + resources, + cancellationToken) + .ConfigureAwait(false); + } + + // Step 4: Build and return response + _logger.DebugFormat("UploadDirectoryCommand.ExecuteAsync: Completed - FilesSuccessfullyUploaded={0}, FilesFailed={1}", + _numberOfFilesSuccessfullyUploaded, _errors.Count); + + var response = BuildResponse(); + FireTransferCompletedEvent(response); + return response; + } + catch + { + FireTransferFailedEvent(); + throw; + } } /// diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs index 004e83d1f81d..802d544ef86c 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryRequest.cs @@ -55,6 +55,33 @@ public FailurePolicy FailurePolicy set { this.failurePolicy = value; } } + /// + /// Occurs when the upload directory operation is initiated. + /// + /// + /// This event is raised before any files are uploaded, providing information about + /// the total number of files and bytes that will be uploaded. + /// + public event EventHandler UploadDirectoryInitiatedEvent; + + /// + /// Occurs when the upload directory operation completes successfully. + /// + /// + /// This event is raised after all files have been processed (successfully or with failures), + /// providing the final response and statistics. + /// + public event EventHandler UploadDirectoryCompletedEvent; + + /// + /// Occurs when the upload directory operation fails. + /// + /// + /// This event is raised when the entire operation fails (not individual file failures). + /// Individual file failures are reported through . + /// + public event EventHandler UploadDirectoryFailedEvent; + /// /// Occurs when an individual object fails to upload during an UploadDirectory operation. /// @@ -72,6 +99,33 @@ public FailurePolicy FailurePolicy /// public event EventHandler ObjectUploadFailedEvent; + /// + /// Internal helper used by the transfer implementation to raise the . + /// + /// The event args. + internal void OnRaiseUploadDirectoryInitiatedEvent(UploadDirectoryInitiatedEventArgs args) + { + UploadDirectoryInitiatedEvent?.Invoke(this, args); + } + + /// + /// Internal helper used by the transfer implementation to raise the . + /// + /// The event args. + internal void OnRaiseUploadDirectoryCompletedEvent(UploadDirectoryCompletedEventArgs args) + { + UploadDirectoryCompletedEvent?.Invoke(this, args); + } + + /// + /// Internal helper used by the transfer implementation to raise the . + /// + /// The event args. + internal void OnRaiseUploadDirectoryFailedEvent(UploadDirectoryFailedEventArgs args) + { + UploadDirectoryFailedEvent?.Invoke(this, args); + } + /// /// Internal helper used by the transfer implementation to raise the . /// @@ -421,6 +475,157 @@ public UploadDirectoryFileRequestArgs(TransferUtilityUploadRequest request) public TransferUtilityUploadRequest UploadRequest { get; set; } } + /// + /// Provides data for . + /// + public class UploadDirectoryInitiatedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the class. + /// + /// The upload directory request. + /// The total number of files to upload. + /// The total number of bytes to upload. + internal UploadDirectoryInitiatedEventArgs( + TransferUtilityUploadDirectoryRequest request, + long totalFiles, + long totalBytes) + { + Request = request; + TotalFiles = totalFiles; + TotalBytes = totalBytes; + } + + /// + /// Gets the upload directory request. + /// + public TransferUtilityUploadDirectoryRequest Request { get; private set; } + + /// + /// Gets the total number of files to upload. + /// + public long TotalFiles { get; private set; } + + /// + /// Gets the total number of bytes to upload. + /// + public long TotalBytes { get; private set; } + } + + /// + /// Provides data for . + /// + public class UploadDirectoryCompletedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the class. + /// + /// The upload directory request. + /// The upload directory response. + /// The number of files successfully uploaded. + /// The total number of files attempted. + /// The number of bytes transferred. + /// The total number of bytes. + internal UploadDirectoryCompletedEventArgs( + TransferUtilityUploadDirectoryRequest request, + TransferUtilityUploadDirectoryResponse response, + long transferredFiles, + long totalFiles, + long transferredBytes, + long totalBytes) + { + Request = request; + Response = response; + TransferredFiles = transferredFiles; + TotalFiles = totalFiles; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + } + + /// + /// Gets the upload directory request. + /// + public TransferUtilityUploadDirectoryRequest Request { get; private set; } + + /// + /// Gets the upload directory response. + /// + public TransferUtilityUploadDirectoryResponse Response { get; private set; } + + /// + /// Gets the number of files successfully uploaded. + /// + public long TransferredFiles { get; private set; } + + /// + /// Gets the total number of files attempted. + /// + public long TotalFiles { get; private set; } + + /// + /// Gets the number of bytes transferred. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total number of bytes. + /// + public long TotalBytes { get; private set; } + } + + /// + /// Provides data for . + /// + public class UploadDirectoryFailedEventArgs : EventArgs + { + /// + /// Initializes a new instance of the class. + /// + /// The upload directory request. + /// The number of files successfully uploaded before failure. + /// The total number of files attempted. + /// The number of bytes transferred before failure. + /// The total number of bytes. + internal UploadDirectoryFailedEventArgs( + TransferUtilityUploadDirectoryRequest request, + long transferredFiles, + long totalFiles, + long transferredBytes, + long totalBytes) + { + Request = request; + TransferredFiles = transferredFiles; + TotalFiles = totalFiles; + TransferredBytes = transferredBytes; + TotalBytes = totalBytes; + } + + /// + /// Gets the upload directory request. + /// + public TransferUtilityUploadDirectoryRequest Request { get; private set; } + + /// + /// Gets the number of files successfully uploaded before failure. + /// + public long TransferredFiles { get; private set; } + + /// + /// Gets the total number of files attempted. + /// + public long TotalFiles { get; private set; } + + /// + /// Gets the number of bytes transferred before failure. + /// + public long TransferredBytes { get; private set; } + + /// + /// Gets the total number of bytes. + /// + public long TotalBytes { get; private set; } + } + /// /// Provides data for /// which is raised when an individual object fails to upload during an diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.async.cs index e11731050c43..a25a43c8b5a5 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.async.cs @@ -145,6 +145,94 @@ public partial interface ITransferUtility /// The task object representing the asynchronous operation. Task UploadDirectoryAsync(TransferUtilityUploadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)); + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// The source directory, that is, the directory containing the files to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadDirectoryWithResponseAsync(string directory, string bucketName, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// The source directory, that is, the directory containing the files to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. + /// + /// + /// A pattern used to identify the files from the source directory to upload. + /// + /// + /// A search option that specifies whether to recursively search for files to upload + /// in subdirectories. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadDirectoryWithResponseAsync(string directory, string bucketName, string searchPattern, SearchOption searchOption, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// + /// The request that contains all the parameters required to upload a directory. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + Task UploadDirectoryWithResponseAsync(TransferUtilityUploadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)); + #endregion #region DownloadDirectory diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs index 979fc54daf9f..b984fa70b800 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/ITransferUtility.sync.cs @@ -110,6 +110,85 @@ public partial interface ITransferUtility /// The request that contains all the parameters required to upload a directory. /// void UploadDirectory(TransferUtilityUploadDirectoryRequest request); + + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The source directory, that is, the directory containing the files to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. + /// + /// Response metadata including the number of objects uploaded and any errors encountered. + TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(string directory, string bucketName); + + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The source directory, that is, the directory containing the files to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. + /// + /// + /// A pattern used to identify the files from the source directory to upload. + /// + /// + /// A search option that specifies whether to recursively search for files to upload + /// in subdirectories. + /// + /// Response metadata including the number of objects uploaded and any errors encountered. + TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(string directory, string bucketName, string searchPattern, SearchOption searchOption); + + /// + /// Uploads files from a specified directory and returns response metadata. + /// The object key is derived from the file names inside the directory. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The request that contains all the parameters required to upload a directory. + /// + /// Response metadata including the number of objects uploaded and any errors encountered. + TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(TransferUtilityUploadDirectoryRequest request); #endregion #region Upload diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.async.cs index 81d2d4b43351..6cda6c5c6194 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.async.cs @@ -54,103 +54,21 @@ public partial class TransferUtility : ITransferUtility { #region UploadDirectory - /// - /// Uploads files from a specified directory. - /// The object key is derived from the file names - /// inside the directory. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The source directory, that is, the directory containing the files to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadDirectoryAsync(string directory, string bucketName, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadDirectoryRequest(directory, bucketName); await UploadDirectoryAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Uploads files from a specified directory. - /// The object key is derived from the file names - /// inside the directory. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The source directory, that is, the directory containing the files to upload. - /// - /// - /// The target Amazon S3 bucket, that is, the name of the bucket to upload the files to. - /// - /// - /// A pattern used to identify the files from the source directory to upload. - /// - /// - /// A search option that specifies whether to recursively search for files to upload - /// in subdirectories. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadDirectoryAsync(string directory, string bucketName, string searchPattern, SearchOption searchOption, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructUploadDirectoryRequest(directory, bucketName, searchPattern, searchOption); await UploadDirectoryAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Uploads files from a specified directory. - /// The object key is derived from the file names - /// inside the directory. - /// For large uploads, the file will be divided and uploaded in parts using - /// Amazon S3's multipart API. The parts will be reassembled as one object in - /// Amazon S3. - /// - /// - /// - /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. - /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. - /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able - /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, - /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. - /// - /// - /// - /// The request that contains all the parameters required to upload a directory. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task UploadDirectoryAsync(TransferUtilityUploadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(UploadDirectoryAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -163,45 +81,44 @@ public partial class TransferUtility : ITransferUtility } } + /// + public async Task UploadDirectoryWithResponseAsync(string directory, string bucketName, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadDirectoryRequest(directory, bucketName); + return await UploadDirectoryWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + public async Task UploadDirectoryWithResponseAsync(string directory, string bucketName, string searchPattern, SearchOption searchOption, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadDirectoryRequest(directory, bucketName, searchPattern, searchOption); + return await UploadDirectoryWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + public async Task UploadDirectoryWithResponseAsync(TransferUtilityUploadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)) + { + using(CreateSpan(nameof(UploadDirectoryWithResponseAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) + { + CheckForBlockedArn(request.BucketName, "UploadDirectory"); + validate(request); + UploadDirectoryCommand command = new UploadDirectoryCommand(this, this._config, request); + command.UploadFilesConcurrently = request.UploadFilesConcurrently; + return await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + } + } + #endregion #region DownloadDirectory - /// - /// Downloads the objects in Amazon S3 that have a key that starts with the value - /// specified by s3Directory. - /// - /// - /// The name of the bucket containing the Amazon S3 objects to download. - /// - /// - /// The directory in Amazon S3 to download. - /// - /// - /// The local directory to download the objects to. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task DownloadDirectoryAsync(string bucketName, string s3Directory, string localDirectory, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructDownloadDirectoryRequest(bucketName, s3Directory, localDirectory); await DownloadDirectoryAsync(request, cancellationToken).ConfigureAwait(false); } - /// - /// Downloads the objects in Amazon S3 that have a key that starts with the value - /// specified by the S3Directory - /// property of the passed in TransferUtilityDownloadDirectoryRequest object. - /// - /// - /// Contains all the parameters required to download objects from Amazon S3 - /// into a local directory. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task DownloadDirectoryAsync(TransferUtilityDownloadDirectoryRequest request, CancellationToken cancellationToken = default(CancellationToken)) { using(CreateSpan(nameof(DownloadDirectoryAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) @@ -215,22 +132,7 @@ public partial class TransferUtility : ITransferUtility #endregion #region Download - /// - /// Downloads the content from Amazon S3 and writes it to the specified file. - /// - /// - /// The file path where the content from Amazon S3 will be written to. - /// - /// - /// The name of the bucket containing the Amazon S3 object to download. - /// - /// - /// The key under which the Amazon S3 object is stored. - /// - /// - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// - /// The task object representing the asynchronous operation. + /// public async Task DownloadAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) { var request = ConstructDownloadRequest(filePath, bucketName, key); diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs index 457360ccd3f7..05f5c2cac349 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs @@ -70,6 +70,24 @@ public void UploadDirectory(TransferUtilityUploadDirectoryRequest request) ExceptionDispatchInfo.Capture(e.InnerException).Throw(); } } + + /// + public TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(string directory, string bucketName) + { + return UploadDirectoryWithResponseAsync(directory, bucketName).GetAwaiter().GetResult(); + } + + /// + public TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(string directory, string bucketName, string searchPattern, SearchOption searchOption) + { + return UploadDirectoryWithResponseAsync(directory, bucketName, searchPattern, searchOption).GetAwaiter().GetResult(); + } + + /// + public TransferUtilityUploadDirectoryResponse UploadDirectoryWithResponse(TransferUtilityUploadDirectoryRequest request) + { + return UploadDirectoryWithResponseAsync(request).GetAwaiter().GetResult(); + } #endregion #region Upload @@ -130,57 +148,25 @@ public void Upload(TransferUtilityUploadRequest request) /// public TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName) { - try - { - return UploadWithResponseAsync(filePath, bucketName).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return UploadWithResponseAsync(filePath, bucketName).GetAwaiter().GetResult(); } /// public TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName, string key) { - try - { - return UploadWithResponseAsync(filePath, bucketName, key).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return UploadWithResponseAsync(filePath, bucketName, key).GetAwaiter().GetResult(); } /// public TransferUtilityUploadResponse UploadWithResponse(Stream stream, string bucketName, string key) { - try - { - return UploadWithResponseAsync(stream, bucketName, key).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return UploadWithResponseAsync(stream, bucketName, key).GetAwaiter().GetResult(); } /// public TransferUtilityUploadResponse UploadWithResponse(TransferUtilityUploadRequest request) { - try - { - return UploadWithResponseAsync(request).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return UploadWithResponseAsync(request).GetAwaiter().GetResult(); } #endregion @@ -218,29 +204,13 @@ public Stream OpenStream(TransferUtilityOpenStreamRequest request) /// public TransferUtilityOpenStreamResponse OpenStreamWithResponse(string bucketName, string key) { - try - { - return OpenStreamWithResponseAsync(bucketName, key).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return OpenStreamWithResponseAsync(bucketName, key).GetAwaiter().GetResult(); } /// public TransferUtilityOpenStreamResponse OpenStreamWithResponse(TransferUtilityOpenStreamRequest request) { - try - { - return OpenStreamWithResponseAsync(request).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return OpenStreamWithResponseAsync(request).GetAwaiter().GetResult(); } #endregion @@ -275,29 +245,13 @@ public void Download(TransferUtilityDownloadRequest request) /// public TransferUtilityDownloadResponse DownloadWithResponse(string filePath, string bucketName, string key) { - try - { - return DownloadWithResponseAsync(filePath, bucketName, key).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return DownloadWithResponseAsync(filePath, bucketName, key).GetAwaiter().GetResult(); } /// public TransferUtilityDownloadResponse DownloadWithResponse(TransferUtilityDownloadRequest request) { - try - { - return DownloadWithResponseAsync(request).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return DownloadWithResponseAsync(request).GetAwaiter().GetResult(); } #endregion @@ -331,29 +285,13 @@ public void DownloadDirectory(TransferUtilityDownloadDirectoryRequest request) /// public TransferUtilityDownloadDirectoryResponse DownloadDirectoryWithResponse(string bucketName, string s3Directory, string localDirectory) { - try - { - return DownloadDirectoryWithResponseAsync(bucketName, s3Directory, localDirectory).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return DownloadDirectoryWithResponseAsync(bucketName, s3Directory, localDirectory).GetAwaiter().GetResult(); } /// public TransferUtilityDownloadDirectoryResponse DownloadDirectoryWithResponse(TransferUtilityDownloadDirectoryRequest request) { - try - { - return DownloadDirectoryWithResponseAsync(request).Result; - } - catch (AggregateException e) - { - ExceptionDispatchInfo.Capture(e.InnerException).Throw(); - return null; - } + return DownloadDirectoryWithResponseAsync(request).GetAwaiter().GetResult(); } #endregion diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryLifecycleTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryLifecycleTests.cs new file mode 100644 index 000000000000..6e3c70d7eb0d --- /dev/null +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryLifecycleTests.cs @@ -0,0 +1,309 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Util; +using Amazon.S3.Transfer.Model; +using AWSSDK_DotNet.IntegrationTests.Utils; + +namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 +{ + /// + /// Integration tests for TransferUtility upload directory lifecycle events. + /// Tests the initiated, completed, and failed events for directory uploads. + /// + [TestClass] + public class TransferUtilityUploadDirectoryLifecycleTests : TestBase + { + public static readonly long MEG_SIZE = (int)Math.Pow(2, 20); + public static readonly long KILO_SIZE = (int)Math.Pow(2, 10); + public static readonly string BasePath = Path.Combine(Path.GetTempPath(), "transferutility", "uploaddirectorylifecycle"); + + private static string bucketName; + private static string plainTextContentType = "text/plain"; + + [ClassInitialize()] + public static void ClassInitialize(TestContext a) + { + bucketName = S3TestUtils.CreateBucketWithWait(Client); + } + + [ClassCleanup] + public static void ClassCleanup() + { + AmazonS3Util.DeleteS3BucketWithObjects(Client, bucketName); + BaseClean(); + if (Directory.Exists(BasePath)) + { + Directory.Delete(BasePath, true); + } + } + + [TestMethod] + [TestCategory("S3")] + public void UploadDirectoryInitiatedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Request.BucketName); + Assert.IsNotNull(args.Request.Directory); + + // Verify that total files and bytes are provided in initiated event + Assert.IsTrue(args.TotalFiles > 0, "TotalFiles should be greater than 0"); + Assert.IsTrue(args.TotalBytes > 0, "TotalBytes should be greater than 0"); + + } + }; + UploadDirectoryWithLifecycleEvents(10 * MEG_SIZE, eventValidator, null, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void UploadDirectoryCompletedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + + // Verify progress information is available in completed event + Assert.IsTrue(args.TotalFiles > 0, "TotalFiles should be greater than 0"); + Assert.AreEqual(args.TransferredFiles, args.TotalFiles, "All files should be transferred"); + Assert.IsTrue(args.TotalBytes > 0, "TotalBytes should be greater than 0"); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes, "All bytes should be transferred"); + + // Verify response contains expected data + Assert.AreEqual(args.TransferredFiles, args.Response.ObjectsUploaded, "Response ObjectsUploaded should match TransferredFiles"); + Assert.AreEqual(0, args.Response.ObjectsFailed, "No objects should have failed"); + Assert.AreEqual(DirectoryResult.Success, args.Response.Result, "Result should be Success"); + + } + }; + UploadDirectoryWithLifecycleEvents(12 * MEG_SIZE, null, eventValidator, null); + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void UploadDirectoryFailedEventTest() + { + var eventValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + } + }; + + // Use an invalid bucket name to force a real exception + // Bucket names with uppercase letters are invalid and will cause an exception + var invalidBucketName = "INVALID-BUCKET-NAME-" + Guid.NewGuid().ToString(); + + var directory = CreateTestDirectory(5 * MEG_SIZE); + var directoryPath = directory.FullName; + + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = invalidBucketName, // This will cause an exception due to invalid bucket name + Directory = directoryPath, + KeyPrefix = "test-prefix", + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + request.UploadDirectoryFailedEvent += eventValidator.OnEventFired; + + try + { + transferUtility.UploadDirectory(request); + Assert.Fail("Expected an exception to be thrown for invalid bucket name"); + } + catch (Exception ex) + { + // Expected exception - the failed event should have been fired + Console.WriteLine($"Expected exception caught: {ex.GetType().Name} - {ex.Message}"); + } + + eventValidator.AssertEventFired(); + } + + [TestMethod] + [TestCategory("S3")] + public void UploadDirectoryCompleteLifecycleTest() + { + var initiatedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.AreEqual(bucketName, args.Request.BucketName); + Assert.IsNotNull(args.Request.Directory); + Assert.IsTrue(args.TotalFiles > 0); + Assert.IsTrue(args.TotalBytes > 0); + } + }; + + var completedValidator = new TransferLifecycleEventValidator + { + Validate = (args) => + { + Assert.IsNotNull(args.Request); + Assert.IsNotNull(args.Response); + Assert.AreEqual(args.TransferredFiles, args.TotalFiles); + Assert.AreEqual(args.TransferredBytes, args.TotalBytes); + Assert.IsTrue(args.TotalFiles > 0, "Should have uploaded at least one file"); + Assert.AreEqual(DirectoryResult.Success, args.Response.Result); + } + }; + + UploadDirectoryWithLifecycleEvents(15 * MEG_SIZE, initiatedValidator, completedValidator, null); + + initiatedValidator.AssertEventFired(); + completedValidator.AssertEventFired(); + } + + #region Helper Methods + + void UploadDirectoryWithLifecycleEvents(long fileSize, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + var directory = CreateTestDirectory(fileSize); + var keyPrefix = directory.Name; + var directoryPath = directory.FullName; + + UploadDirectoryWithLifecycleEventsAndDirectory(directoryPath, keyPrefix, initiatedValidator, completedValidator, failedValidator); + } + + void UploadDirectoryWithLifecycleEventsAndDirectory(string directoryPath, string keyPrefix, + TransferLifecycleEventValidator initiatedValidator, + TransferLifecycleEventValidator completedValidator, + TransferLifecycleEventValidator failedValidator) + { + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = directoryPath, + KeyPrefix = keyPrefix, + ContentType = plainTextContentType, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + if (initiatedValidator != null) + { + request.UploadDirectoryInitiatedEvent += initiatedValidator.OnEventFired; + } + + if (completedValidator != null) + { + request.UploadDirectoryCompletedEvent += completedValidator.OnEventFired; + } + + if (failedValidator != null) + { + request.UploadDirectoryFailedEvent += failedValidator.OnEventFired; + } + + transferUtility.UploadDirectory(request); + + // Validate uploaded directory contents if it was successful + var directory = new DirectoryInfo(directoryPath); + ValidateDirectoryContentsInS3(Client, bucketName, keyPrefix, directory); + } + + public static DirectoryInfo CreateTestDirectory(long fileSize = 0, int numberOfTestFiles = 3) + { + if (fileSize == 0) + fileSize = 1 * MEG_SIZE; + + var directoryPath = GenerateDirectoryPath(); + for (int i = 0; i < numberOfTestFiles; i++) + { + var filePath = Path.Combine(Path.Combine(directoryPath, i.ToString()), "file.txt"); + UtilityMethods.GenerateFile(filePath, fileSize); + } + + return new DirectoryInfo(directoryPath); + } + + public static string GenerateDirectoryPath(string baseName = "UploadDirectoryLifecycleTest") + { + var directoryName = UtilityMethods.GenerateName(baseName); + var directoryPath = Path.Combine(BasePath, directoryName); + return directoryPath; + } + + public static void ValidateDirectoryContentsInS3(IAmazonS3 s3client, string bucketName, string keyPrefix, DirectoryInfo sourceDirectory) + { + var directoryPath = sourceDirectory.FullName; + var files = sourceDirectory.GetFiles("*", SearchOption.AllDirectories); + foreach (var file in files) + { + var filePath = file.FullName; + var relativePath = filePath.Substring(directoryPath.Length + 1); + var key = (!string.IsNullOrEmpty(keyPrefix) ? keyPrefix + "/" : string.Empty) + relativePath.Replace("\\", "/"); + + // Verify the object exists in S3 + var metadata = s3client.GetObjectMetadata(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key + }); + Assert.IsNotNull(metadata, $"Object {key} should exist in S3"); + Console.WriteLine($"Validated object exists in S3: {key}"); + } + } + + #endregion + + #region Shared Helper Classes + + class TransferLifecycleEventValidator + { + public Action Validate { get; set; } + public bool EventFired { get; private set; } + public Exception EventException { get; private set; } + + public void OnEventFired(object sender, T eventArgs) + { + try + { + Console.WriteLine("Lifecycle Event Fired: {0}", typeof(T).Name); + Validate?.Invoke(eventArgs); + EventFired = true; // Only set if validation passes + } + catch (Exception ex) + { + EventException = ex; + EventFired = false; // Ensure we don't mark as fired on failure + Console.WriteLine("Exception caught in lifecycle event: {0}", ex.Message); + // Don't re-throw, let AssertEventFired() handle it + } + } + + public void AssertEventFired() + { + if (EventException != null) + throw EventException; + Assert.IsTrue(EventFired, $"{typeof(T).Name} event was not fired"); + } + } + + #endregion + } +} diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryWithResponseTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryWithResponseTests.cs new file mode 100644 index 000000000000..dee59bc11a5b --- /dev/null +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityUploadDirectoryWithResponseTests.cs @@ -0,0 +1,672 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Amazon.S3; +using Amazon.S3.Model; +using Amazon.S3.Transfer; +using Amazon.S3.Transfer.Model; +using Amazon.S3.Util; +using AWSSDK_DotNet.IntegrationTests.Utils; + +namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 +{ + /// + /// Integration tests for TransferUtility.UploadDirectoryWithResponseAsync functionality. + /// These tests verify end-to-end functionality with actual S3 operations and directory I/O. + /// + /// These integration tests focus on: + /// - Basic directory uploads with response object + /// - Progress tracking with response + /// - Multipart uploads in directory context + /// - Concurrent vs sequential uploads + /// - Nested directory structures + /// - Response validation + /// + [TestClass] + public class TransferUtilityUploadDirectoryWithResponseTests : TestBase + { + private static readonly long MB = 1024 * 1024; + private static readonly long KB = 1024; + private static string bucketName; + private static string tempDirectory; + + [ClassInitialize()] + public static void ClassInitialize(TestContext testContext) + { + bucketName = S3TestUtils.CreateBucketWithWait(Client); + tempDirectory = Path.Combine(Path.GetTempPath(), "S3UploadDirectoryTests-" + Guid.NewGuid().ToString()); + Directory.CreateDirectory(tempDirectory); + } + + [ClassCleanup] + public static void ClassCleanup() + { + AmazonS3Util.DeleteS3BucketWithObjects(Client, bucketName); + + // Clean up temp directory + if (Directory.Exists(tempDirectory)) + { + try + { + Directory.Delete(tempDirectory, recursive: true); + } + catch + { + // Best effort cleanup + } + } + + BaseClean(); + } + + [TestCleanup] + public void TestCleanup() + { + // Clean up any test directories after each test + if (Directory.Exists(tempDirectory)) + { + foreach (var subDir in Directory.GetDirectories(tempDirectory)) + { + try + { + Directory.Delete(subDir, recursive: true); + } + catch + { + // Best effort cleanup + } + } + } + } + + #region Basic Upload Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_BasicUpload_ReturnsCorrectResponse() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("basic-upload"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 5; + + CreateLocalTestDirectory(uploadPath, 2 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(fileCount, response.ObjectsUploaded, "ObjectsUploaded should match file count"); + Assert.AreEqual(0, response.ObjectsFailed, "ObjectsFailed should be 0"); + Assert.AreEqual(DirectoryResult.Success, response.Result, "Result should be Success"); + + // Verify all files were uploaded to S3 + await VerifyObjectsInS3(keyPrefix, fileCount); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_EmptyDirectory_ReturnsZeroObjectsUploaded() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("empty-directory"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + Directory.CreateDirectory(uploadPath); + + // Act - Upload empty directory + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(0, response.ObjectsUploaded, "ObjectsUploaded should be 0 for empty directory"); + Assert.AreEqual(0, response.ObjectsFailed, "ObjectsFailed should be 0"); + Assert.AreEqual(DirectoryResult.Success, response.Result, "Result should be Success"); + } + + #endregion + + #region Progress Tracking Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_WithProgressTracking_FiresProgressEvents() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("progress-tracking"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 3; + + CreateLocalTestDirectory(uploadPath, 5 * MB, fileCount); + + var progressEvents = new List(); + var progressLock = new object(); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + request.UploadDirectoryProgressEvent += (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response, "Response should not be null"); + Assert.AreEqual(fileCount, response.ObjectsUploaded); + Assert.IsTrue(progressEvents.Count > 0, "Progress events should have fired"); + + // Verify final progress event + var finalEvent = progressEvents.Last(); + Assert.AreEqual(fileCount, finalEvent.NumberOfFilesUploaded); + Assert.AreEqual(fileCount, finalEvent.TotalNumberOfFiles); + Assert.AreEqual(finalEvent.TransferredBytes, finalEvent.TotalBytes); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_WithLifecycleEvents_FiresInitiatedAndCompleted() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("lifecycle-events"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 3; + + CreateLocalTestDirectory(uploadPath, 2 * MB, fileCount); + + bool initiatedFired = false; + bool completedFired = false; + UploadDirectoryInitiatedEventArgs initiatedArgs = null; + UploadDirectoryCompletedEventArgs completedArgs = null; + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + request.UploadDirectoryInitiatedEvent += (sender, args) => + { + initiatedFired = true; + initiatedArgs = args; + }; + + request.UploadDirectoryCompletedEvent += (sender, args) => + { + completedFired = true; + completedArgs = args; + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsTrue(initiatedFired, "Initiated event should have fired"); + Assert.IsTrue(completedFired, "Completed event should have fired"); + + Assert.IsNotNull(initiatedArgs); + Assert.AreEqual(fileCount, initiatedArgs.TotalFiles); + Assert.IsTrue(initiatedArgs.TotalBytes > 0); + + Assert.IsNotNull(completedArgs); + Assert.AreEqual(fileCount, completedArgs.TransferredFiles); + Assert.AreEqual(fileCount, completedArgs.TotalFiles); + Assert.AreEqual(completedArgs.Response, response); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_SequentialMode_IncludesCurrentFileDetails() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("sequential-progress"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + + CreateLocalTestDirectory(uploadPath, 3 * MB, 3); + + var progressEvents = new List(); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + UploadFilesConcurrently = false // Sequential mode + }; + + request.UploadDirectoryProgressEvent += (sender, args) => + { + progressEvents.Add(args); + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(3, response.ObjectsUploaded); + + // In sequential mode, should have CurrentFile populated + var eventsWithFile = progressEvents.Where(e => e.CurrentFile != null).ToList(); + Assert.IsTrue(eventsWithFile.Count > 0, "Should have events with CurrentFile populated"); + + foreach (var evt in eventsWithFile) + { + Assert.IsNotNull(evt.CurrentFile); + Assert.IsTrue(evt.TotalNumberOfBytesForCurrentFile > 0); + } + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_ConcurrentMode_OmitsCurrentFileDetails() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("concurrent-progress"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + + CreateLocalTestDirectory(uploadPath, 3 * MB, 4); + + var progressEvents = new List(); + var progressLock = new object(); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + UploadFilesConcurrently = true // Concurrent mode + }; + + request.UploadDirectoryProgressEvent += (sender, args) => + { + lock (progressLock) + { + progressEvents.Add(args); + } + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(4, response.ObjectsUploaded); + Assert.IsTrue(progressEvents.Count > 0); + + // In concurrent mode, CurrentFile should be null + foreach (var evt in progressEvents) + { + Assert.IsNull(evt.CurrentFile, "CurrentFile should be null in concurrent mode"); + Assert.AreEqual(0, evt.TransferredBytesForCurrentFile); + Assert.AreEqual(0, evt.TotalNumberOfBytesForCurrentFile); + } + } + + #endregion + + #region Multipart Upload Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + [TestCategory("Multipart")] + public async Task UploadDirectoryWithResponse_WithMultipartFiles_UploadsSuccessfully() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("multipart-directory"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 3; + + // Create directory with large files to trigger multipart (>16MB threshold) + CreateLocalTestDirectory(uploadPath, 20 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(fileCount, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + Assert.AreEqual(DirectoryResult.Success, response.Result); + + // Verify all files uploaded with correct sizes + await VerifyObjectsInS3WithSize(keyPrefix, fileCount, 20 * MB); + } + + #endregion + + #region Nested Directory Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_NestedDirectories_PreservesStructure() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("nested-structure"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + + // Create nested directory structure + var nestedFiles = new Dictionary + { + { "level1/file1.txt", 1 * MB }, + { "level1/level2/file2.txt", 2 * MB }, + { "level1/level2/level3/file3.txt", 3 * MB } + }; + + CreateLocalTestDirectoryWithStructure(uploadPath, nestedFiles); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(nestedFiles.Count, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + + // Verify S3 keys have proper structure + foreach (var kvp in nestedFiles) + { + var expectedKey = keyPrefix + "/" + kvp.Key.Replace('\\', '/'); + await VerifyObjectExistsInS3(expectedKey, kvp.Value); + } + } + + #endregion + + #region Concurrent vs Sequential Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_ConcurrentMode_UploadsAllFiles() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("concurrent-upload"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 10; + + CreateLocalTestDirectory(uploadPath, 2 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + UploadFilesConcurrently = true + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(fileCount, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + + await VerifyObjectsInS3(keyPrefix, fileCount); + } + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_SequentialMode_UploadsAllFiles() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("sequential-upload"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + var fileCount = 5; + + CreateLocalTestDirectory(uploadPath, 3 * MB, fileCount); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories, + UploadFilesConcurrently = false + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(fileCount, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + + await VerifyObjectsInS3(keyPrefix, fileCount); + } + + #endregion + + #region Mixed File Size Tests + + [TestMethod] + [TestCategory("S3")] + [TestCategory("UploadDirectory")] + public async Task UploadDirectoryWithResponse_MixedFileSizes_UploadsAll() + { + // Arrange + var keyPrefix = UtilityMethods.GenerateName("mixed-sizes"); + var uploadPath = Path.Combine(tempDirectory, keyPrefix + "-upload"); + + var mixedFiles = new Dictionary + { + { "tiny.txt", 100 }, // 100 bytes + { "small.txt", 512 * KB }, // 512 KB + { "medium.txt", 5 * MB }, // 5 MB + { "large.txt", 20 * MB } // 20 MB (multipart) + }; + + CreateLocalTestDirectoryWithStructure(uploadPath, mixedFiles); + + // Act + var transferUtility = new TransferUtility(Client); + var request = new TransferUtilityUploadDirectoryRequest + { + BucketName = bucketName, + Directory = uploadPath, + KeyPrefix = keyPrefix, + SearchPattern = "*", + SearchOption = SearchOption.AllDirectories + }; + + var response = await transferUtility.UploadDirectoryWithResponseAsync(request); + + // Assert + Assert.IsNotNull(response); + Assert.AreEqual(mixedFiles.Count, response.ObjectsUploaded); + Assert.AreEqual(0, response.ObjectsFailed); + + // Verify each file's size in S3 + foreach (var kvp in mixedFiles) + { + var s3Key = keyPrefix + "/" + kvp.Key; + await VerifyObjectExistsInS3(s3Key, kvp.Value); + } + } + + #endregion + + #region Helper Methods + + /// + /// Creates a local test directory with specified number of files. + /// + private static void CreateLocalTestDirectory(string directoryPath, long fileSize, int fileCount) + { + Directory.CreateDirectory(directoryPath); + + for (int i = 0; i < fileCount; i++) + { + var fileName = $"file{i}.dat"; + var filePath = Path.Combine(directoryPath, fileName); + UtilityMethods.GenerateFile(filePath, fileSize); + } + } + + /// + /// Creates a local test directory with specific file structure. + /// + private static void CreateLocalTestDirectoryWithStructure(string directoryPath, Dictionary files) + { + foreach (var kvp in files) + { + var filePath = Path.Combine(directoryPath, kvp.Key.Replace('/', Path.DirectorySeparatorChar)); + var directory = Path.GetDirectoryName(filePath); + + if (!string.IsNullOrEmpty(directory)) + { + Directory.CreateDirectory(directory); + } + + UtilityMethods.GenerateFile(filePath, kvp.Value); + } + } + + /// + /// Verifies that the expected number of objects exist in S3 under the given prefix. + /// + private static async Task VerifyObjectsInS3(string keyPrefix, int expectedCount) + { + var listRequest = new ListObjectsV2Request + { + BucketName = bucketName, + Prefix = keyPrefix + "/" + }; + + var listResponse = await Client.ListObjectsV2Async(listRequest); + + // Filter out directory markers + var actualObjects = listResponse.S3Objects + .Where(s3o => !s3o.Key.EndsWith("/", StringComparison.Ordinal)) + .ToList(); + + Assert.AreEqual(expectedCount, actualObjects.Count, + $"Expected {expectedCount} objects in S3 under prefix '{keyPrefix}', found {actualObjects.Count}"); + } + + /// + /// Verifies that the expected number of objects exist in S3 with the specified size. + /// + private static async Task VerifyObjectsInS3WithSize(string keyPrefix, int expectedCount, long expectedSize) + { + var listRequest = new ListObjectsV2Request + { + BucketName = bucketName, + Prefix = keyPrefix + "/" + }; + + var listResponse = await Client.ListObjectsV2Async(listRequest); + + var actualObjects = listResponse.S3Objects + .Where(s3o => !s3o.Key.EndsWith("/", StringComparison.Ordinal)) + .ToList(); + + Assert.AreEqual(expectedCount, actualObjects.Count); + + foreach (var s3Object in actualObjects) + { + Assert.AreEqual(expectedSize, s3Object.Size, + $"Object {s3Object.Key} should be {expectedSize} bytes"); + } + } + + /// + /// Verifies that a specific object exists in S3 with the expected size. + /// + private static async Task VerifyObjectExistsInS3(string key, long expectedSize) + { + var getRequest = new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = key + }; + + var metadata = await Client.GetObjectMetadataAsync(getRequest); + + Assert.IsNotNull(metadata, $"Object should exist in S3: {key}"); + Assert.AreEqual(expectedSize, metadata.ContentLength, + $"Object {key} should be {expectedSize} bytes"); + } + + #endregion + } +}