diff --git a/.env b/.env index 85d4b5e..b956163 100644 --- a/.env +++ b/.env @@ -1,3 +1,3 @@ -MINIO_ACCESS_KEY=kJ1sAPIURDyj5HKjeoqq -MINIO_SECRET_KEY=8Sv8jYmW8uOeAwE4c6cimaRnZLcAsfjPI4qxjt9f -MINIO_PUBLIC_ENDPOINT="https://bucket-production-05b8.up.railway.app:443" +MINIO_ACCESS_KEY=minioadmin +MINIO_SECRET_KEY=minioadmin +MINIO_PUBLIC_ENDPOINT="http://localhost:9000" diff --git a/.gitignore b/.gitignore index 4907589..822819d 100644 --- a/.gitignore +++ b/.gitignore @@ -14,5 +14,3 @@ zig-out/ *.so *.dylib *.a -!archive/ -!archive/** diff --git a/README.md b/README.md index c479ab6..a047491 100644 --- a/README.md +++ b/README.md @@ -89,7 +89,6 @@ pub fn main() !void { var client = try s3.S3Client.init(allocator, .{ .access_key_id = "your-key", .secret_access_key = "your-secret", - .region = "us-east-1", // Optional: Use with MinIO or other S3-compatible services // .endpoint = "http://localhost:9000", }); @@ -114,7 +113,6 @@ The main client interface for S3 operations. const client = try s3.S3Client.init(allocator, .{ .access_key_id = "your-key", .secret_access_key = "your-secret", - .region = "us-east-1", .endpoint = "http://localhost:9000", // Optional, for S3-compatible services }); ``` diff --git a/archive/v0.2.0.tar.gz b/archive/v0.2.0.tar.gz deleted file mode 100644 index 2894404..0000000 Binary files a/archive/v0.2.0.tar.gz and /dev/null differ diff --git a/build.zig b/build.zig index b4973c5..4c5286a 100644 --- a/build.zig +++ b/build.zig @@ -16,11 +16,14 @@ pub fn build(b: *std.Build) void { }); // Create the library that others can use as a dependency - const lib = b.addStaticLibrary(.{ + const lib = b.addLibrary(.{ .name = "s3-client", - .root_source_file = b.path("src/s3/lib.zig"), - .target = target, - .optimize = optimize, + .linkage = .static, + .root_module = b.createModule(.{ + .root_source_file = b.path("src/s3/lib.zig"), + .target = target, + .optimize = optimize, + }), }); lib.root_module.addImport("s3", s3_module); lib.root_module.addImport("dotenv", dotenv_dep.module("dotenv")); @@ -29,9 +32,11 @@ pub fn build(b: *std.Build) void { // Create the example executable const exe = b.addExecutable(.{ .name = "s3-example", - .root_source_file = b.path("src/main.zig"), - .target = target, - .optimize = optimize, + .root_module = b.createModule(.{ + .root_source_file = b.path("src/main.zig"), + .target = target, + .optimize = optimize, + }), }); exe.root_module.addImport("s3", s3_module); exe.root_module.addImport("dotenv", dotenv_dep.module("dotenv")); @@ -48,9 +53,11 @@ pub fn build(b: *std.Build) void { // Unit tests const unit_tests = b.addTest(.{ - .root_source_file = b.path("src/s3/lib.zig"), - .target = target, - .optimize = optimize, + .root_module = b.createModule(.{ + .root_source_file = b.path("src/s3/lib.zig"), + .target = target, + .optimize = optimize, + }), }); unit_tests.root_module.addImport("dotenv", dotenv_dep.module("dotenv")); const run_unit_tests = b.addRunArtifact(unit_tests); @@ -59,9 +66,11 @@ pub fn build(b: *std.Build) void { // Integration tests const integration_tests = b.addTest(.{ - .root_source_file = b.path("tests/integration/s3_client_test.zig"), - .target = target, - .optimize = optimize, + .root_module = b.createModule(.{ + .root_source_file = b.path("tests/integration/s3_client_test.zig"), + .target = target, + .optimize = optimize, + }), }); integration_tests.root_module.addImport("s3", s3_module); integration_tests.root_module.addImport("dotenv", dotenv_dep.module("dotenv")); diff --git a/build.zig.zon b/build.zig.zon index c79eb51..4ece1fc 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -2,11 +2,11 @@ .name = .zig_s3, .version = "0.2.0", .fingerprint = 0xf58a772c84221497, - .minimum_zig_version = "0.13.0", + .minimum_zig_version = "0.15.1", .dependencies = .{ .dotenv = .{ - .url = "https://github.com/dying-will-bullet/dotenv/archive/refs/tags/v0.2.0.tar.gz", - .hash = "12201347c20e8c4cb161f16bba30e51da17c32b89ef887b9b8932d6ed135ee5a6d01", + .url = "git+https://github.com/dying-will-bullet/dotenv?ref=master#82a53e32b25371ab255f150aee089e5f8ffba8c1", + .hash = "dotenv-0.2.2-ikMfwBB3AAC89NhL0EVbn6mRYA5z4fec8PcNKJ-P8m16", }, }, .paths = .{ diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..1b23a1c --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,6 @@ +services: + minio: + image: minio/minio + ports: + - 9000:9000 + command: "server /data" diff --git a/src/main.zig b/src/main.zig index 8933561..a8a227b 100644 --- a/src/main.zig +++ b/src/main.zig @@ -46,7 +46,6 @@ fn loadEnvVars() !s3.S3Config { return s3.S3Config{ .access_key_id = access_key.?, .secret_access_key = secret_key.?, - .region = "us-west-1", .endpoint = endpoint.?, }; } @@ -88,7 +87,9 @@ pub fn main() !void { } // Print bucket information - const stdout = std.io.getStdOut().writer(); + var stdout_buffer: [1024]u8 = undefined; + var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer); + const stdout = &stdout_writer.interface; try stdout.writeAll("\nAvailable buckets:\n"); for (buckets) |bucket| { std.log.info("Bucket found: {s}", .{bucket.name}); diff --git a/src/root.zig b/src/root.zig deleted file mode 100644 index 1ffb439..0000000 --- a/src/root.zig +++ /dev/null @@ -1,11 +0,0 @@ -const s3 = @import("s3.zig"); - -// Re-export all the types and functions from s3.zig -pub const S3Error = s3.S3Error; -pub const S3Config = s3.S3Config; -pub const S3Client = s3.S3Client; - -test { - // Run all tests in the S3 module - @import("std").testing.refAllDecls(@This()); -} diff --git a/src/s3/bucket/operations.zig b/src/s3/bucket/operations.zig index 118e4c5..3313e5b 100644 --- a/src/s3/bucket/operations.zig +++ b/src/s3/bucket/operations.zig @@ -36,12 +36,11 @@ pub fn createBucket(self: *S3Client, bucket_name: []const u8) !void { defer self.allocator.free(uri_str); std.debug.print("Constructed URI: {s}\n", .{uri_str}); - var req = try self.request(.PUT, try Uri.parse(uri_str), null); - defer req.deinit(); + const res = try self.request(.PUT, try Uri.parse(uri_str), .{ .body = "" }); std.debug.print("Sent PUT request to create bucket\n", .{}); - if (req.response.status != .ok and req.response.status != .created) { - switch (req.response.status) { + if (res.status != .ok and res.status != .created) { + switch (res.status) { .conflict => { std.debug.print("Bucket already exists: {s}\n", .{bucket_name}); return S3Error.BucketAlreadyExists; @@ -59,7 +58,7 @@ pub fn createBucket(self: *S3Client, bucket_name: []const u8) !void { return S3Error.ServiceUnavailable; }, else => { - std.debug.print("Failed to create bucket: {s}, status: {}\n", .{ bucket_name, req.response.status }); + std.debug.print("Failed to create bucket: {s}, status: {}\n", .{ bucket_name, res.status }); return S3Error.InvalidResponse; }, } @@ -89,10 +88,8 @@ pub fn deleteBucket(self: *S3Client, bucket_name: []const u8) !void { const uri_str = try fmt.allocPrint(self.allocator, "{s}/{s}", .{ endpoint, bucket_name }); defer self.allocator.free(uri_str); - var req = try self.request(.DELETE, try Uri.parse(uri_str), null); - defer req.deinit(); - - if (req.response.status != .no_content) { + const res = try self.request(.DELETE, try Uri.parse(uri_str), .{}); + if (res.status != .no_content) { return S3Error.InvalidResponse; } } @@ -129,41 +126,40 @@ pub fn listBuckets(self: *S3Client) ![]BucketInfo { try fmt.allocPrint(self.allocator, "https://s3.{s}.amazonaws.com", .{self.config.region}); defer if (self.config.endpoint == null) self.allocator.free(endpoint); - log.debug("Requesting list of buckets from endpoint: {s}", .{endpoint}); - var req = try self.request(.GET, try Uri.parse(endpoint), null); - defer req.deinit(); + var response_writer = std.Io.Writer.Allocating.init(self.allocator); + defer response_writer.deinit(); - switch (req.response.status) { + log.debug("Requesting list of buckets from endpoint: {s}", .{endpoint}); + const res = try self.request(.GET, try Uri.parse(endpoint), .{ .response = .{ .body = &response_writer.writer } }); + switch (res.status) { .ok => {}, .unauthorized, .forbidden => { - log.err("Authentication failed: {}", .{req.response.status}); + log.err("Authentication failed: {}", .{res.status}); return S3Error.InvalidCredentials; }, .bad_request => { - log.err("Bad request: {}", .{req.response.status}); + log.err("Bad request: {}", .{res.status}); return S3Error.InvalidResponse; }, else => { - log.err("Unexpected response status: {}", .{req.response.status}); + log.err("Unexpected response status: {}", .{res.status}); return S3Error.InvalidResponse; }, } log.debug("Reading response body", .{}); - const max_size = 1024 * 1024; // 1MB max response size - const body = try req.reader().readAllAlloc(self.allocator, max_size); - defer self.allocator.free(body); + const body = response_writer.written(); log.debug("Raw response: {s}", .{body}); log.debug("Parsing XML response", .{}); - var buckets = std.ArrayList(BucketInfo).init(self.allocator); + var buckets: std.ArrayList(BucketInfo) = .empty; errdefer { for (buckets.items) |bucket| { self.allocator.free(bucket.name); self.allocator.free(bucket.creation_date); } - buckets.deinit(); + buckets.deinit(self.allocator); } var it = std.mem.splitSequence(u8, body, ""); @@ -182,14 +178,157 @@ pub fn listBuckets(self: *S3Client) ![]BucketInfo { const date = try self.allocator.dupe(u8, bucket_xml[date_start + 14 .. date_end]); log.debug("Bucket creation date: {s}", .{date}); - try buckets.append(.{ + try buckets.append(self.allocator, .{ .name = name, .creation_date = date, }); } log.info("Found {} buckets total", .{buckets.items.len}); - return buckets.toOwnedSlice(); + return buckets.toOwnedSlice(self.allocator); +} + +/// Object information returned by listObjects +pub const ObjectInfo = struct { + /// Key (path) of the object + key: []const u8, + /// Size of the object in bytes + size: u64, + /// Last modified timestamp as ISO-8601 string + last_modified: []const u8, + /// ETag of the object (usually MD5 of content) + etag: []const u8, + + pub fn deinit(self: *ObjectInfo, alloc: Allocator) void { + alloc.free(self.key); + alloc.free(self.last_modified); + alloc.free(self.etag); + } +}; + +/// Options for listing objects +pub const ListObjectsOptions = struct { + /// Filter objects by prefix + prefix: ?[]const u8 = null, + /// Maximum number of objects to return (1-1000) + max_keys: ?u32 = null, + /// Start listing from this key (for pagination) + start_after: ?[]const u8 = null, +}; + +/// List objects in a bucket. +/// +/// This implements the S3 ListObjectsV2 API. +/// Results are sorted by key in lexicographical order. +/// +/// Parameters: +/// - self: Pointer to initialized S3Client +/// - bucket_name: Name of the bucket to list +/// - options: Optional listing parameters (prefix, pagination) +/// +/// Returns: Slice of ObjectInfo structs. Caller owns the memory. +/// +/// Errors: +/// - BucketNotFound: If the bucket doesn't exist +/// - InvalidResponse: If listing fails or response is malformed +/// - ConnectionFailed: Network or connection issues +/// - OutOfMemory: Memory allocation failure +pub fn listObjects( + self: *S3Client, + bucket_name: []const u8, + options: ListObjectsOptions, +) ![]ObjectInfo { + const endpoint = if (self.config.endpoint) |ep| ep else try fmt.allocPrint(self.allocator, "https://s3.{s}.amazonaws.com", .{self.config.region}); + defer if (self.config.endpoint == null) self.allocator.free(endpoint); + + // Build query string + var query: std.ArrayList(u8) = .empty; + defer query.deinit(self.allocator); + + try query.appendSlice(self.allocator, "list-type=2"); // Use ListObjectsV2 + + if (options.prefix) |prefix| { + try query.appendSlice(self.allocator, "&prefix="); + try query.appendSlice(self.allocator, prefix); + } + + if (options.max_keys) |max_keys| { + try query.appendSlice(self.allocator, "&max-keys="); + try query.print(self.allocator, "{d}", .{max_keys}); + } + + if (options.start_after) |start_after| { + try query.appendSlice(self.allocator, "&start-after="); + try query.appendSlice(self.allocator, start_after); + } + + const uri_str = try fmt.allocPrint(self.allocator, "{s}/{s}?{s}", .{ + endpoint, + bucket_name, + query.items, + }); + defer self.allocator.free(uri_str); + + var response_writer = std.Io.Writer.Allocating.init(self.allocator); + defer response_writer.deinit(); + + const res = try self.request(.GET, try Uri.parse(uri_str), .{ .response = .{ .body = &response_writer.writer } }); + if (res.status == .not_found) { + return S3Error.BucketNotFound; + } + if (res.status != .ok) { + return S3Error.InvalidResponse; + } + + // Read response body + const body = response_writer.written(); + + // Parse XML response + var objects: std.ArrayList(ObjectInfo) = .empty; + errdefer { + for (objects.items) |*object| { + object.deinit(self.allocator); + } + objects.deinit(self.allocator); + } + + // Simple XML parsing - look for elements + var it = std.mem.splitSequence(u8, body, ""); + _ = it.first(); // Skip first part before any + + while (it.next()) |object_xml| { + // Extract key + const key_start = std.mem.indexOf(u8, object_xml, "") orelse continue; + const key_end = std.mem.indexOf(u8, object_xml, "") orelse continue; + const key = try self.allocator.dupe(u8, object_xml[key_start + 5 .. key_end]); + errdefer self.allocator.free(key); + + // Extract size + const size_start = std.mem.indexOf(u8, object_xml, "") orelse continue; + const size_end = std.mem.indexOf(u8, object_xml, "") orelse continue; + const size = try std.fmt.parseInt(u64, object_xml[size_start + 6 .. size_end], 10); + + // Extract last modified + const lm_start = std.mem.indexOf(u8, object_xml, "") orelse continue; + const lm_end = std.mem.indexOf(u8, object_xml, "") orelse continue; + const last_modified = try self.allocator.dupe(u8, object_xml[lm_start + 13 .. lm_end]); + errdefer self.allocator.free(last_modified); + + // Extract ETag + const etag_start = std.mem.indexOf(u8, object_xml, "") orelse continue; + const etag_end = std.mem.indexOf(u8, object_xml, "") orelse continue; + const etag = try self.allocator.dupe(u8, object_xml[etag_start + 6 .. etag_end]); + errdefer self.allocator.free(etag); + + try objects.append(self.allocator, .{ + .key = key, + .size = size, + .last_modified = last_modified, + .etag = etag, + }); + } + + return objects.toOwnedSlice(self.allocator); } test "bucket operations" { @@ -197,9 +336,9 @@ test "bucket operations" { // Initialize test client with dummy credentials const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -214,9 +353,9 @@ test "bucket operations error handling" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -240,9 +379,8 @@ test "bucket operations with custom endpoint" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", .endpoint = "http://localhost:9000", }; @@ -259,9 +397,9 @@ test "bucket name validation" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -303,9 +441,9 @@ test "list buckets" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -345,9 +483,8 @@ test "list buckets with custom endpoint" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", .endpoint = "http://localhost:9000", }; @@ -375,9 +512,9 @@ test "list buckets error handling" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "invalid-key", - .secret_access_key = "invalid-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -394,9 +531,9 @@ test "bucket lifecycle with validation" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -458,9 +595,9 @@ test "bucket operations with special characters" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -526,9 +663,9 @@ test "bucket operations concurrency" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -580,9 +717,9 @@ test "bucket operations error cases" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -621,9 +758,9 @@ test "bucket operations with empty strings" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -654,8 +791,9 @@ test "bucket operations region handling" { for (regions) |region| { const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", .region = region, }; diff --git a/src/s3/client/auth/signer.zig b/src/s3/client/auth/signer.zig index 136a99e..92b2061 100644 --- a/src/s3/client/auth/signer.zig +++ b/src/s3/client/auth/signer.zig @@ -30,8 +30,6 @@ /// const credentials = Credentials{ /// .access_key = "AKIAIOSFODNN7EXAMPLE", /// .secret_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", -/// .region = "us-east-1", -/// .service = "s3", /// }; /// /// const params = SigningParams{ @@ -51,7 +49,8 @@ const fmt = std.fmt; const mem = std.mem; const time = std.time; const log = std.log; -const time_utils = @import("time.zig"); + +const UtcDateTime = @import("time.zig").UtcDateTime; /// AWS region for signing const Region = []const u8; @@ -62,7 +61,7 @@ const Service = []const u8; pub const Credentials = struct { access_key: []const u8, secret_key: []const u8, - region: Region, + region: Region = "us-east-1", service: Service = "s3", }; @@ -88,13 +87,14 @@ pub fn signRequest(allocator: Allocator, credentials: Credentials, params: Signi const now = std.time.timestamp(); break :blk @as(i64, @intCast(now)); }; + const dt = UtcDateTime.init(timestamp); // Get the date string in the correct format (YYYYMMDD) - const date_str = try time_utils.formatAmzDate(allocator, timestamp); + const date_str = try dt.formatAmzDate(allocator); defer allocator.free(date_str); // Get the full datetime string for x-amz-date header - const datetime_str = try time_utils.formatAmzDateTime(allocator, timestamp); + const datetime_str = try dt.formatAmz(allocator); defer allocator.free(datetime_str); log.debug("Signing request with date: {s}, datetime: {s}", .{ date_str, datetime_str }); @@ -151,8 +151,8 @@ pub fn signRequest(allocator: Allocator, credentials: Credentials, params: Signi defer allocator.free(signature); // Get signed headers string - var header_names = std.ArrayList([]const u8).init(allocator); - defer header_names.deinit(); + var header_names: std.ArrayList([]const u8) = .empty; + defer header_names.deinit(allocator); defer { for (header_names.items) |name| { allocator.free(name); @@ -162,7 +162,7 @@ pub fn signRequest(allocator: Allocator, credentials: Credentials, params: Signi var header_it = params.headers.iterator(); while (header_it.next()) |entry| { const lower_name = try std.ascii.allocLowerString(allocator, entry.key_ptr.*); - try header_names.append(lower_name); + try header_names.append(allocator, lower_name); } std.mem.sortUnstable([]const u8, header_names.items, {}, struct { @@ -189,29 +189,30 @@ pub fn signRequest(allocator: Allocator, credentials: Credentials, params: Signi /// Create canonical request string for signing fn createCanonicalRequest(allocator: Allocator, params: SigningParams) ![]const u8 { - var canonical = std.ArrayList(u8).init(allocator); - errdefer canonical.deinit(); + var canonical: std.ArrayList(u8) = .empty; + errdefer canonical.deinit(allocator); // Add HTTP method (uppercase) - try canonical.appendSlice(params.method); - try canonical.append('\n'); + try canonical.appendSlice(allocator, params.method); + try canonical.append(allocator, '\n'); // Add canonical URI (must be normalized) - try canonical.appendSlice(params.path); - try canonical.append('\n'); + try canonical.appendSlice(allocator, params.path); + try canonical.append(allocator, '\n'); // Add canonical query string (empty for now) - try canonical.append('\n'); + try canonical.append(allocator, '\n'); // Create sorted list of header names for consistent ordering - var header_names = std.ArrayList([]const u8).init(allocator); - defer header_names.deinit(); + var header_names: std.ArrayList([]const u8) = .empty; + defer header_names.deinit(allocator); var header_it = params.headers.iterator(); while (header_it.next()) |entry| { // Convert header names to lowercase const lower_name = try std.ascii.allocLowerString(allocator, entry.key_ptr.*); - try header_names.append(lower_name); + errdefer allocator.free(lower_name); + try header_names.append(allocator, lower_name); } defer { for (header_names.items) |name| { @@ -231,33 +232,35 @@ fn createCanonicalRequest(allocator: Allocator, params: SigningParams) ![]const const value = params.headers.get(name) orelse continue; // Trim and normalize value const trimmed_value = std.mem.trim(u8, value, " \t\r\n"); - try canonical.appendSlice(name); - try canonical.append(':'); - try canonical.appendSlice(trimmed_value); - try canonical.append('\n'); + try canonical.appendSlice(allocator, name); + try canonical.append(allocator, ':'); + try canonical.appendSlice(allocator, trimmed_value); + try canonical.append(allocator, '\n'); } - try canonical.append('\n'); + try canonical.append(allocator, '\n'); // Add signed headers const signed_headers = try std.mem.join(allocator, ";", header_names.items); defer allocator.free(signed_headers); - try canonical.appendSlice(signed_headers); - try canonical.append('\n'); + try canonical.appendSlice(allocator, signed_headers); + try canonical.append(allocator, '\n'); // Add payload hash const payload_hash = if (params.body) |body| try hashPayload(allocator, body) else + // SHA256 hash of empty string, pretty funny "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; defer if (params.body != null) allocator.free(payload_hash); - try canonical.appendSlice(payload_hash); + try canonical.appendSlice(allocator, payload_hash); - return canonical.toOwnedSlice(); + return canonical.toOwnedSlice(allocator); } /// Get credential scope string fn getCredentialScope(allocator: Allocator, credentials: Credentials, timestamp: i64) ![]const u8 { - const date = try time_utils.formatDate(allocator, timestamp); + const dt = UtcDateTime.init(timestamp); + const date = try dt.formatAmzDate(allocator); defer allocator.free(date); return fmt.allocPrint( @@ -303,34 +306,34 @@ fn createStringToSign( canonical_request: []const u8, timestamp: i64, ) ![]const u8 { - var result = std.ArrayList(u8).init(allocator); - errdefer result.deinit(); + var result: std.ArrayList(u8) = .empty; + errdefer result.deinit(allocator); // Algorithm - try result.appendSlice("AWS4-HMAC-SHA256\n"); + try result.appendSlice(allocator, "AWS4-HMAC-SHA256\n"); // Get the full datetime string for the second line - const datetime_str = try time_utils.formatAmzDateTime(allocator, timestamp); + const datetime_str = try UtcDateTime.init(timestamp).formatAmz(allocator); defer allocator.free(datetime_str); - try result.appendSlice(datetime_str); - try result.append('\n'); + try result.appendSlice(allocator, datetime_str); + try result.append(allocator, '\n'); // Credential scope - try result.appendSlice(credential_scope); - try result.append('\n'); + try result.appendSlice(allocator, credential_scope); + try result.append(allocator, '\n'); // Hashed canonical request var hash: [crypto.hash.sha2.Sha256.digest_length]u8 = undefined; crypto.hash.sha2.Sha256.hash(canonical_request, &hash, .{}); - const hash_hex = try std.fmt.allocPrint(allocator, "{s}", .{std.fmt.fmtSliceHexLower(&hash)}); + const hash_hex = try std.fmt.allocPrint(allocator, "{x}", .{hash}); defer allocator.free(hash_hex); - try result.appendSlice(hash_hex); + try result.appendSlice(allocator, hash_hex); - return result.toOwnedSlice(); + return result.toOwnedSlice(allocator); } /// Calculate request signature using derived signing key -fn calculateSignature( +pub fn calculateSignature( allocator: Allocator, signing_key: []const u8, string_to_sign: []const u8, @@ -340,7 +343,7 @@ fn calculateSignature( crypto.auth.hmac.sha2.HmacSha256.create(&hmac, string_to_sign, signing_key); // Convert to hex - return std.fmt.allocPrint(allocator, "{s}", .{std.fmt.fmtSliceHexLower(&hmac)}); + return std.fmt.allocPrint(allocator, "{x}", .{hmac}); } /// Create final authorization header value @@ -350,7 +353,8 @@ fn createAuthorizationHeader( signature: []const u8, timestamp: i64, ) ![]const u8 { - const date = try time_utils.formatAmzDate(allocator, timestamp); + const dt = UtcDateTime.init(timestamp); + const date = try dt.formatAmzDate(allocator); defer allocator.free(date); return fmt.allocPrint( @@ -380,17 +384,13 @@ fn createCanonicalQueryString(allocator: Allocator, path: []const u8) ![]const u } /// Calculate SHA256 hash of payload -pub fn hashPayload(allocator: Allocator, payload: ?[]const u8) ![]const u8 { +pub fn hashPayload(allocator: Allocator, payload: []const u8) ![]const u8 { var hash: [crypto.hash.sha2.Sha256.digest_length]u8 = undefined; - if (payload) |data| { - crypto.hash.sha2.Sha256.hash(data, &hash, .{}); - } else { - crypto.hash.sha2.Sha256.hash("", &hash, .{}); - } - return std.fmt.allocPrint(allocator, "{s}", .{std.fmt.fmtSliceHexLower(&hash)}); + crypto.hash.sha2.Sha256.hash(payload, &hash, .{}); + return std.fmt.allocPrint(allocator, "{x}", .{hash}); } -fn deriveSigningKey( +pub fn deriveSigningKey( allocator: Allocator, secret_key: []const u8, date_str: []const u8, @@ -435,8 +435,6 @@ test "AWS Signature V4" { const credentials = Credentials{ .access_key = "AKIAIOSFODNN7EXAMPLE", .secret_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", - .region = "us-east-1", - .service = "s3", }; var headers = std.StringHashMap([]const u8).init(allocator); @@ -461,7 +459,7 @@ test "AWS Signature V4" { test "hashPayload empty" { const allocator = std.testing.allocator; - const hash = try hashPayload(allocator, null); + const hash = try hashPayload(allocator, ""); defer allocator.free(hash); // SHA256 of empty string try std.testing.expectEqualStrings( @@ -509,13 +507,14 @@ test "deriveSigningKey" { const credentials = Credentials{ .access_key = "AKIAIOSFODNN7EXAMPLE", .secret_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", - .region = "us-east-1", - .service = "s3", }; const timestamp = 1704067200; // 2024-01-01 00:00:00 UTC + const dt = UtcDateTime.init(timestamp); + const date_str = try dt.formatAmzDate(allocator); + defer allocator.free(date_str); - const key = try deriveSigningKey(allocator, credentials.secret_key, timestamp, credentials.region, credentials.service); + const key = try deriveSigningKey(allocator, credentials.secret_key, date_str, credentials.region, credentials.service); defer allocator.free(key); try std.testing.expect(key.len > 0); @@ -527,8 +526,6 @@ test "signRequest full flow" { const credentials = Credentials{ .access_key = "AKIAIOSFODNN7EXAMPLE", .secret_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", - .region = "us-east-1", - .service = "s3", }; var headers = std.StringHashMap([]const u8).init(allocator); diff --git a/src/s3/client/auth/time.zig b/src/s3/client/auth/time.zig index 9392684..3c18b4e 100644 --- a/src/s3/client/auth/time.zig +++ b/src/s3/client/auth/time.zig @@ -4,67 +4,91 @@ const Allocator = std.mem.Allocator; const fmt = std.fmt; const time = std.time; -/// Format timestamp as YYYYMMDD for AWS date -pub fn formatDate(allocator: Allocator, timestamp: i64) ![]const u8 { - const seconds = @as(u64, @intCast(timestamp)); - const epoch_seconds = seconds; - const epoch_days = @divFloor(epoch_seconds, 86400); - var days = @as(u32, @intCast(epoch_days)); - - // Calculate year, month, day - var year: u32 = 1970; - while (days >= 365) { +pub const UtcDateTime = struct { + year: u32, + month: u8, + day: u8, + hour: u8, + minute: u8, + second: u8, + + /// Create a UTC date time from the Unix timestamp (in seconds) + pub fn init(timestamp_secs: i64) UtcDateTime { + const seconds: u64 = @intCast(timestamp_secs); + const day_seconds: u64 = @mod(seconds, 86400); + const epoch_days: u64 = @divFloor(seconds, 86400); + var days: u32 = @intCast(epoch_days); + + // Calculate year, month, day + var year: u32 = 1970; + while (days >= 365) { + const is_leap = (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0); + const days_in_year = if (is_leap) @as(u32, 366) else @as(u32, 365); + if (days < days_in_year) break; + days -= days_in_year; + year += 1; + } + + const month_days = [_]u8{ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; + var month: u8 = 1; const is_leap = (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0); - const days_in_year = if (is_leap) @as(u32, 366) else @as(u32, 365); - if (days < days_in_year) break; - days -= days_in_year; - year += 1; - } - - const month_days = [_]u8{ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; - var month: u32 = 1; - const is_leap = (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0); - for (month_days, 0..) |days_in_month, i| { - var adjusted_days = days_in_month; - if (i == 1 and is_leap) adjusted_days += 1; - if (days < adjusted_days) break; - days -= adjusted_days; - month += 1; + for (month_days, 0..) |days_in_month, i| { + var adjusted_days = days_in_month; + if (i == 1 and is_leap) adjusted_days += 1; + if (days < adjusted_days) break; + days -= adjusted_days; + month += 1; + } + + return .{ + .year = year, + .month = month, + .day = @intCast(days + 1), + .hour = @intCast(@divFloor(day_seconds, 3600)), + .minute = @intCast(@divFloor(@mod(day_seconds, 3600), 60)), + .second = @intCast(@mod(day_seconds, 60)), + }; } - const day = days + 1; + pub fn now() UtcDateTime { + return .init(std.time.timestamp()); + } - // Format as YYYYMMDD - return fmt.allocPrint( - allocator, - "{d:0>4}{d:0>2}{d:0>2}", - .{ year, month, day }, - ); -} + /// Format timestamp in ISO standard YYYY-MM-DD'T'HH:MI:SS'Z' + pub fn format(self: *const UtcDateTime, alloc: Allocator) ![]const u8 { + return fmt.allocPrint( + alloc, + "{d:0>4}-{d:0>2}-{d:0>2}T{d:0>2}:{d:0>2}:{d:0>2}Z", + .{ self.year, self.month, self.day, self.hour, self.minute, self.second }, + ); + } -/// Format timestamp as YYYYMMDD'T'HHMMSS'Z' for AWS -pub fn formatAmzDateTime(allocator: Allocator, timestamp: i64) ![]const u8 { - const date = try formatDate(allocator, timestamp); - defer allocator.free(date); + /// Format timestamp as YYYYMMDD for AWS date + pub fn formatAmzDate(self: *const UtcDateTime, alloc: Allocator) ![]const u8 { + return fmt.allocPrint( + alloc, + "{d:0>4}{d:0>2}{d:0>2}", + .{ self.year, self.month, self.day }, + ); + } - const seconds = @as(u64, @intCast(timestamp)); - const day_seconds = @mod(seconds, 86400); - const hour = @divFloor(day_seconds, 3600); - const minute = @divFloor(@mod(day_seconds, 3600), 60); - const second = @mod(day_seconds, 60); - - return fmt.allocPrint( - allocator, - "{s}T{d:0>2}{d:0>2}{d:0>2}Z", - .{ date, hour, minute, second }, - ); -} + /// Format timestamp as YYYYMMDD'T'HHMMSS'Z' for AWS + pub fn formatAmz(self: *const UtcDateTime, alloc: Allocator) ![]const u8 { + return fmt.allocPrint( + alloc, + "{d:0>4}{d:0>2}{d:0>2}T{d:0>2}{d:0>2}{d:0>2}Z", + .{ self.year, self.month, self.day, self.hour, self.minute, self.second }, + ); + } -/// Format timestamp as YYYYMMDD for AWS credential scope -pub fn formatAmzDate(allocator: Allocator, timestamp: i64) ![]const u8 { - return formatDate(allocator, timestamp); -} + pub fn jsonStringify(self: *const UtcDateTime, jws: anytype) !void { + try jws.print( + "\"{d:0>4}-{d:0>2}-{d:0>2}T{d:0>2}:{d:0>2}:{d:0>2}Z\"", + .{ self.year, self.month, self.day, self.hour, self.minute, self.second }, + ); + } +}; /// Check if a year is a leap year fn isLeapYear(year: u16) bool { @@ -77,11 +101,12 @@ test "time formatting" { // Test case: 2013-05-24T00:00:00Z (1369353600) const timestamp: i64 = 1369353600; - const datetime = try formatAmzDateTime(allocator, timestamp); + const dt = UtcDateTime.init(timestamp); + const datetime = try dt.formatAmz(allocator); defer allocator.free(datetime); try std.testing.expectEqualStrings("20130524T000000Z", datetime); - const date = try formatAmzDate(allocator, timestamp); + const date = try dt.formatAmzDate(allocator); defer allocator.free(date); try std.testing.expectEqualStrings("20130524", date); } diff --git a/src/s3/client/implementation.zig b/src/s3/client/implementation.zig index e9069cd..1d5d868 100644 --- a/src/s3/client/implementation.zig +++ b/src/s3/client/implementation.zig @@ -12,7 +12,7 @@ const HttpClient = http.Client; const lib = @import("../lib.zig"); const signer = @import("auth/signer.zig"); -const time_utils = @import("auth/time.zig"); +const UtcDateTime = @import("auth/time.zig").UtcDateTime; const S3Error = lib.S3Error; /// Configuration for the S3 client. @@ -23,7 +23,7 @@ pub const S3Config = struct { /// AWS secret access key or compatible credential secret_access_key: []const u8, /// AWS region (e.g., "us-east-1") - region: []const u8, + region: []const u8 = "us-east-1", /// Optional custom endpoint for S3-compatible services (e.g., MinIO, LocalStack) endpoint: ?[]const u8 = null, }; @@ -75,6 +75,14 @@ pub const S3Client = struct { self.allocator.destroy(self); } + const RequestOptions = struct { + body: ?[]const u8 = null, + response: struct { + head: ?*http.Client.Response.Head = null, + body: ?*std.io.Writer = null, + } = .{}, + }; + /// Generic HTTP request handler used by all S3 operations. /// Handles request setup, authentication, and execution. /// @@ -88,8 +96,8 @@ pub const S3Client = struct { self: *S3Client, method: http.Method, uri: Uri, - body: ?[]const u8, - ) !http.Client.Request { + opts: RequestOptions, + ) !http.Client.FetchResult { log.debug("Starting S3 request: method={s}", .{@tagName(method)}); // Create headers map for signing @@ -115,16 +123,15 @@ pub const S3Client = struct { try headers.put("host", uri_host); // Calculate content hash - const content_hash = try signer.hashPayload(self.allocator, body); + const content_hash = try signer.hashPayload(self.allocator, opts.body orelse ""); defer self.allocator.free(content_hash); try headers.put("x-amz-content-sha256", content_hash); // Get current timestamp and format it properly - const now = std.time.timestamp(); - const timestamp = @as(i64, @intCast(now)); + const timestamp: i64 = std.time.timestamp(); // Format current time as x-amz-date header - const amz_date = try time_utils.formatAmzDateTime(self.allocator, timestamp); + const amz_date = try UtcDateTime.init(timestamp).formatAmz(self.allocator); defer self.allocator.free(amz_date); try headers.put("x-amz-date", amz_date); @@ -134,14 +141,13 @@ pub const S3Client = struct { .access_key = self.config.access_key_id, .secret_key = self.config.secret_access_key, .region = self.config.region, - .service = "s3", }; const params = signer.SigningParams{ .method = @tagName(method), .path = uri_path, .headers = headers, - .body = body, + .body = opts.body, .timestamp = timestamp, // Use same timestamp for signing }; @@ -151,35 +157,72 @@ pub const S3Client = struct { log.debug("Generated auth header: {s}", .{auth_header}); - var server_header_buffer: [8192]u8 = undefined; - var req = try self.http_client.open(method, uri, .{ - .server_header_buffer = &server_header_buffer, + // MinIO isn't sending Content-Length for DELETE operations. + // This results in the fetch hanging until the socket times out (~30s). + const keep_alive: bool = method.responseHasBody() and method != .DELETE; + + var req = try self.http_client.request(method, uri, .{ + .redirect_behavior = .not_allowed, + .headers = .{ + .host = .{ .override = uri_host }, + .content_type = .{ .override = "application/xml" }, + }, .extra_headers = &[_]http.Header{ .{ .name = "Accept", .value = "application/xml" }, .{ .name = "x-amz-content-sha256", .value = content_hash }, .{ .name = "x-amz-date", .value = amz_date }, .{ .name = "Authorization", .value = auth_header }, }, + .keep_alive = keep_alive, }); - errdefer req.deinit(); + defer req.deinit(); + + if (opts.body) |payload| { + req.transfer_encoding = .{ .content_length = payload.len }; + var b = try req.sendBody(&.{}); + try b.writer.writeAll(payload); + try b.end(); + } else { + try req.sendBodiless(); + } - req.headers.host = .{ .override = uri_host }; - req.headers.content_type = .{ .override = "application/xml" }; + var response = try req.receiveHead(&.{}); - if (body) |b| { - req.transfer_encoding = .{ .content_length = b.len }; + if (opts.response.head) |response_head| { + // Dupe underlying head bytes and re-parse + const head_bytes = try self.allocator.dupe(u8, response.head.bytes); + response_head.* = try http.Client.Response.Head.parse(head_bytes); } - try req.send(); + const response_writer = opts.response.body orelse { + const reader = response.reader(&.{}); + // TODO: Can remove this check after this is fixed: https://codeberg.org/ziglang/zig/issues/30070 + if (reader != std.Io.Reader.ending) { + _ = reader.discardRemaining() catch |err| switch (err) { + error.ReadFailed => return response.bodyErr().?, + }; + } + return .{ .status = response.head.status }; + }; - if (body) |b| { - try req.writeAll(b); - } + const decompress_buffer: []u8 = switch (response.head.content_encoding) { + .identity => &.{}, + .zstd => try self.allocator.alloc(u8, std.compress.zstd.default_window_len), + .deflate, .gzip => try self.allocator.alloc(u8, std.compress.flate.max_window_len), + .compress => return error.UnsupportedCompressionMethod, + }; + defer self.allocator.free(decompress_buffer); + + var transfer_buffer: [64]u8 = undefined; + var decompress: http.Decompress = undefined; + const reader = response.readerDecompressing(&transfer_buffer, &decompress, decompress_buffer); - try req.finish(); - try req.wait(); + _ = reader.streamRemaining(response_writer) catch |err| switch (err) { + error.ReadFailed => return response.bodyErr().?, + else => |e| return e, + }; - return req; + return .{ .status = response.head.status }; } }; @@ -187,34 +230,53 @@ test "S3Client request signing" { const allocator = std.testing.allocator; const config = S3Config{ - .access_key_id = "AKIAIOSFODNN7EXAMPLE", - .secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var client = try S3Client.init(allocator, config); defer client.deinit(); + var head: http.Client.Response.Head = undefined; + defer allocator.free(head.bytes); + + var response_writer = std.Io.Writer.Allocating.init(allocator); + defer response_writer.deinit(); + const uri = try Uri.parse("https://examplebucket.s3.amazonaws.com/test.txt"); - var req = try client.request(.GET, uri, null); - defer req.deinit(); + _ = try client.request(.GET, uri, .{ .response = .{ .head = &head, .body = &response_writer.writer } }); + + var contains_authorization: bool = false; + var contains_content_sha256: bool = false; + var contains_date: bool = false; + + var it = head.iterateHeaders(); + while (it.next()) |header| { + if (std.ascii.eqlIgnoreCase(header.name, "authorization")) { + contains_authorization = true; + } else if (std.ascii.eqlIgnoreCase(header.name, "x-amz-content-sha256")) { + contains_content_sha256 = true; + } else if (std.ascii.eqlIgnoreCase(header.name, "x-amz-date")) { + contains_date = true; + } + } // Verify authorization header is present - try std.testing.expect(req.headers.contains("authorization")); + try std.testing.expect(contains_authorization); // Verify required AWS headers are present - try std.testing.expect(req.headers.contains("x-amz-content-sha256")); - try std.testing.expect(req.headers.contains("x-amz-date")); + try std.testing.expect(contains_content_sha256); + try std.testing.expect(contains_date); } test "S3Client initialization" { const allocator = std.testing.allocator; const config = S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", - .endpoint = null, + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var client = try S3Client.init(allocator, config); @@ -229,9 +291,8 @@ test "S3Client custom endpoint" { const allocator = std.testing.allocator; const config = S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", .endpoint = "http://localhost:9000", }; @@ -245,9 +306,9 @@ test "S3Client request with body" { const allocator = std.testing.allocator; const config = S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var client = try S3Client.init(allocator, config); @@ -255,36 +316,56 @@ test "S3Client request with body" { const uri = try Uri.parse("https://example.s3.amazonaws.com/test.txt"); const body = "Hello, S3!"; - var req = try client.request(.PUT, uri, body); - defer req.deinit(); - try std.testing.expect(req.headers.contains("authorization")); - try std.testing.expect(req.headers.contains("x-amz-content-sha256")); - try std.testing.expect(req.headers.contains("x-amz-date")); - try std.testing.expect(req.transfer_encoding.content_length == body.len); + var head: http.Client.Response.Head = undefined; + defer allocator.free(head.bytes); + + _ = try client.request(.PUT, uri, .{ .body = body, .response = .{ .head = &head } }); + + var contains_authorization: bool = false; + var contains_content_sha256: bool = false; + var contains_date: bool = false; + + var it = head.iterateHeaders(); + while (it.next()) |header| { + if (std.ascii.eqlIgnoreCase(header.name, "authorization")) { + contains_authorization = true; + } else if (std.ascii.eqlIgnoreCase(header.name, "x-amz-content-sha256")) { + contains_content_sha256 = true; + } else if (std.ascii.eqlIgnoreCase(header.name, "x-amz-date")) { + contains_date = true; + } + } + + try std.testing.expect(contains_authorization); + try std.testing.expect(contains_content_sha256); + try std.testing.expect(contains_date); + try std.testing.expect(head.content_length == body.len); } test "S3Client error handling" { const allocator = std.testing.allocator; const config = S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var client = try S3Client.init(allocator, config); defer client.deinit(); const uri = try Uri.parse("https://example.s3.amazonaws.com/test.txt"); - var req = try client.request(.GET, uri, null); - defer req.deinit(); + const res = try client.request(.GET, uri, .{}); + _ = res; + + // ??? // Test error mapping - switch (req.response.status) { - .unauthorized => try std.testing.expectError(S3Error.InvalidCredentials, S3Error.InvalidCredentials), - .forbidden => try std.testing.expectError(S3Error.InvalidCredentials, S3Error.InvalidCredentials), - .not_found => try std.testing.expectError(S3Error.BucketNotFound, S3Error.BucketNotFound), - else => {}, - } + // switch (res.status) { + // .unauthorized => try std.testing.expectError(S3Error.InvalidCredentials, S3Error.InvalidCredentials), + // .forbidden => try std.testing.expectError(S3Error.InvalidCredentials, S3Error.InvalidCredentials), + // .not_found => try std.testing.expectError(S3Error.BucketNotFound, S3Error.BucketNotFound), + // else => {}, + // } } diff --git a/src/s3/get_policy.zig b/src/s3/get_policy.zig new file mode 100644 index 0000000..fcb82d2 --- /dev/null +++ b/src/s3/get_policy.zig @@ -0,0 +1,736 @@ +const std = @import("std"); +const eql = std.mem.eql; +const Allocator = std.mem.Allocator; +const ArenaAllocator = std.heap.ArenaAllocator; + +const signer = @import("client/auth/signer.zig"); +const UtcDateTime = @import("client/auth/time.zig").UtcDateTime; +const S3Config = @import("client/implementation.zig").S3Config; + +const expect = std.testing.expect; + +// std.log.debug doesn't show up in tests? +const debug_logging = false; + +const Self = @This(); + +// DOCS: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html + +_alloc: Allocator, + +config: *const S3Config, + +request: PresignedRequest, + +params: PolicyParams, + +pub const PresignedRequest = struct { + timestamp: ?i64 = null, + expires: u32 = 3600, + /// bucket URL + url: []const u8, + /// file.txt + object: []const u8, +}; + +const PolicyParams = struct { + /// Identifies the version of AWS Signature and the algorithm that you used to calculate the signature. + /// + /// For AWS Signature Version 4, you set this parameter value to AWS4-HMAC-SHA256. + /// This string identifies AWS Signature Version 4 (AWS4) and the HMAC-SHA256 algorithm (HMAC-SHA256). + @"X-Amz-Algorithm": []const u8 = "AWS4-HMAC-SHA256", + + /// In addition to your access key ID, this parameter also provides scope (AWS Region and service) for which the signature is valid. + /// This value must match the scope you use in signature calculations, discussed in the following section. + /// The general form for this parameter value is as follows: + /// + /// ////aws4_request + /// For example: + /// + /// AKIAIOSFODNN7EXAMPLE/20130721/us-east-1/s3/aws4_request + /// For Amazon S3, the AWS-service string is s3. + /// For a list of S3 AWS-region strings, see Regions and Endpoints in the AWS General Reference. + @"X-Amz-Credential": []const u8 = "", + + /// The date and time format must follow the ISO 8601 standard, and must be formatted with the "yyyyMMddTHHmmssZ" format. + /// For example if the date and time was "08/01/2016 15:32:41.982-700" then it must first be converted + /// to UTC (Coordinated Universal Time) and then submitted as "20160801T223241Z". + @"X-Amz-Date": []const u8 = "", + + /// Provides the time period, in seconds, for which the generated presigned URL is valid. + /// For example, 86400 (24 hours). This value is an integer. + /// The minimum value you can set is 1, and the maximum is 604800 (seven days). + /// + /// A presigned URL can be valid for a maximum of seven days because the signing key you use in signature calculation is valid for up to seven days. + @"X-Amz-Expires": []const u8 = "", + + /// Lists the headers that you used to calculate the signature. The following headers are required in the signature calculations: + /// + /// The HTTP 'host' header. + /// + /// Any x-amz-* headers that you plan to add to the request. + /// + /// Note + /// For added security, you should sign all the request headers that you plan to include in your request. + @"X-Amz-SignedHeaders": []const u8 = "host", + + /// Provides the signature to authenticate your request. + /// This signature must match the signature Amazon S3 calculates; otherwise, Amazon S3 denies the request. + /// For example, 733255ef022bec3f2a8701cd61d4b371f3f28c9f193a1f02279211d48d5193d7 + @"X-Amz-Signature": []const u8 = "", +}; + +pub fn init(alloc: Allocator, config: *const S3Config, request: PresignedRequest) Self { + return .{ + ._alloc = alloc, + .config = config, + .request = request, + .params = .{}, + }; +} + +/// Example: AKIAIOSFODNN7EXAMPLE/0130524/us-east-1/s3/aws4_request +pub fn getAmzCred(alloc: Allocator, access_key: []const u8, date: []const u8, region: []const u8) ![]const u8 { + const cred: []const u8 = try std.fmt.allocPrint( + alloc, + "{s}/{s}/{s}/s3/aws4_request", + .{ access_key, date, region }, + ); + return cred; +} + +/// Expiration time must be between 1 second and 7 days +pub fn getAmzExpires(alloc: Allocator, seconds: u32) ![]const u8 { + if (!(seconds >= 1 and seconds <= 604800)) return error.ExpiresOutsideBounds; + const seconds_str = try std.fmt.allocPrint( + alloc, + "{d}", + .{seconds}, + ); + return seconds_str; +} + +/// Example: 20130524/us-east-1/s3/aws4_request +pub fn getAmzScope(alloc: Allocator, date: []const u8, region: []const u8) ![]const u8 { + const scope: []const u8 = try std.fmt.allocPrint( + alloc, + "{s}/{s}/s3/aws4_request", + .{ date, region }, + ); + return scope; +} + +// Called from presign(), expects an arena allocator +pub fn createCanonicalRequest(self: *Self, alloc: Allocator, method: []const u8, host: []const u8, object: []const u8) ![]const u8 { + var canonical: std.ArrayList(u8) = try .initCapacity(alloc, 512); + + // Add HTTP method (uppercase) + try canonical.appendSlice(alloc, method); + try canonical.append(alloc, '\n'); + + // Add canonical URI (must be normalized) + try canonical.appendSlice(alloc, object); + try canonical.append(alloc, '\n'); + + // NOTE: These are the required params. + // If we want to support adding optional params + // this should be made dynamic (requires sorting) + const params = [_][2][]const u8{ + .{ "X-Amz-Algorithm", self.params.@"X-Amz-Algorithm" }, + .{ "X-Amz-Credential", self.params.@"X-Amz-Credential" }, + .{ "X-Amz-Date", self.params.@"X-Amz-Date" }, + .{ "X-Amz-Expires", self.params.@"X-Amz-Expires" }, + .{ "X-Amz-SignedHeaders", self.params.@"X-Amz-SignedHeaders" }, + }; + + const query_str = try buildPercentEncodedQuery(alloc, ¶ms); + + // Add canonical query string + try canonical.appendSlice(alloc, query_str); + try canonical.append(alloc, '\n'); + + try canonical.appendSlice(alloc, "host:"); + try canonical.appendSlice(alloc, host); + try canonical.append(alloc, '\n'); + try canonical.append(alloc, '\n'); + try canonical.appendSlice(alloc, "host"); + try canonical.append(alloc, '\n'); + try canonical.appendSlice(alloc, "UNSIGNED-PAYLOAD"); + + if (debug_logging) { + std.debug.print("canonical_request:\n\n{s}\n\n", .{canonical.items}); + } + return try canonical.toOwnedSlice(alloc); +} + +// Called from presign(), expects an arena allocator +fn buildPercentEncodedQuery(alloc: Allocator, params: []const [2][]const u8) ![]const u8 { + var aw: std.io.Writer.Allocating = try .initCapacity(alloc, 512); + defer aw.deinit(); + + for (params, 0..) |param, i| { + if (i > 0) try aw.writer.writeByte('&'); + try percentEncode(&aw.writer, param[0]); + try aw.writer.writeByte('='); + try percentEncode(&aw.writer, param[1]); + } + + return aw.toOwnedSlice(); +} + +pub fn hashCanonicalRequest(alloc: Allocator, canonical_request: []const u8) ![]const u8 { + const hashed = try signer.hashPayload(alloc, canonical_request); + + if (debug_logging) { + std.debug.print("hashed:\n\n{s}\n\n", .{hashed}); + } + return hashed; +} + +fn createStringToSign(alloc: Allocator, timestamp_8601: []const u8, scope: []const u8, canonical_request_hash: []const u8) ![]const u8 { + var string_to_sign: std.ArrayList(u8) = try .initCapacity(alloc, 512); + + try string_to_sign.appendSlice(alloc, "AWS4-HMAC-SHA256"); + try string_to_sign.append(alloc, '\n'); + try string_to_sign.appendSlice(alloc, timestamp_8601); + try string_to_sign.append(alloc, '\n'); + try string_to_sign.appendSlice(alloc, scope); + try string_to_sign.append(alloc, '\n'); + try string_to_sign.appendSlice(alloc, canonical_request_hash); + + if (debug_logging) { + std.debug.print("string to sign:\n\n{s}\n\n", .{string_to_sign.items}); + } + return try string_to_sign.toOwnedSlice(alloc); +} + +/// Valid characters in a query string value '/' is encoded as %2F. +fn isValidQueryChar(c: u8) bool { + return switch (c) { + 'A'...'Z', + 'a'...'z', + '0'...'9', + '-', + '_', + '.', + '~', + => true, + else => false, + }; +} + +fn percentEncode(writer: *std.Io.Writer, value: []const u8) !void { + try std.Uri.Component.percentEncode(writer, value, isValidQueryChar); +} + +// Called from presign(), expects an arena allocator +fn buildQuery(self: *Self, alloc: Allocator) ![]const u8 { + var query: std.ArrayList(u8) = try .initCapacity(alloc, 512); + + try query.append(alloc, '?'); + + // NOTE: These are the required params. + // If we want to support adding optional params (in X-Amz-SignedHeaders) + // this should be made dynamic (requires sorting) + const params = [_][2][]const u8{ + .{ "X-Amz-Algorithm", self.params.@"X-Amz-Algorithm" }, + .{ "X-Amz-Credential", self.params.@"X-Amz-Credential" }, + .{ "X-Amz-Date", self.params.@"X-Amz-Date" }, + .{ "X-Amz-Expires", self.params.@"X-Amz-Expires" }, + .{ "X-Amz-SignedHeaders", self.params.@"X-Amz-SignedHeaders" }, + .{ "X-Amz-Signature", self.params.@"X-Amz-Signature" }, + }; + + const query_str = try buildPercentEncodedQuery(alloc, ¶ms); + + try query.appendSlice(alloc, query_str); + + if (debug_logging) { + std.debug.print("query:\n\n{s}\n\n", .{query.items}); + } + return try query.toOwnedSlice(alloc); +} + +/// Presigns the GET Policy +pub fn presign(self: *Self) ![]const u8 { + var arena: ArenaAllocator = .init(self._alloc); + defer arena.deinit(); + const alloc: Allocator = arena.allocator(); + + const config = self.config; + const request = self.request; + + const timestamp = request.timestamp orelse std.time.timestamp(); + + const dt = UtcDateTime.init(timestamp); + const date_str = try dt.formatAmzDate(alloc); + const date_time_8601 = try dt.formatAmz(alloc); + + const cred = try getAmzCred(alloc, config.access_key_id, date_str, config.region); + + const expires = try getAmzExpires(alloc, request.expires); + + self.params = .{ + .@"X-Amz-Credential" = cred, + .@"X-Amz-Date" = date_time_8601, + .@"X-Amz-Expires" = expires, + }; + + const uri = try std.Uri.parse(request.url); + const host = try std.fmt.allocPrint(alloc, "{f}", .{uri.fmt(.{ + .authentication = true, + .authority = true, + .fragment = true, + .port = true, + })}); + + var aw: std.io.Writer.Allocating = try .initCapacity(alloc, 512); + + try aw.writer.print( + "{f}", + .{uri.fmt(.{ + .path = true, + })}, + ); + + const written = aw.written(); + if (written.len > 1) { + try aw.writer.writeByte('/'); + } + + try aw.writer.print("{s}", .{request.object}); + + const obj = try aw.toOwnedSlice(); + + if (debug_logging) { + std.debug.print("debug host:\n\n{s}\n\n", .{host}); + } + + if (debug_logging) { + std.debug.print("debug obj:\n\n{s}\n\n", .{obj}); + } + + const canonical_request = try self.createCanonicalRequest(alloc, "GET", host, obj); + + const hashed_canonical_request = try hashCanonicalRequest(alloc, canonical_request); + + const scope = try getAmzScope(alloc, date_str, config.region); + + const string_to_sign = try createStringToSign(alloc, date_time_8601, scope, hashed_canonical_request); + + // Calculate signature + const signature: []const u8 = sig: { + const signing_key = try signer.deriveSigningKey( + alloc, + config.secret_access_key, + date_str, + config.region, + "s3", + ); + + break :sig try signer.calculateSignature(alloc, signing_key, string_to_sign); + }; + self.params.@"X-Amz-Signature" = signature; + + const query = try self.buildQuery(alloc); + + var get_url: std.ArrayList(u8) = .empty; + + try get_url.appendSlice(alloc, request.url); + if (request.url[request.url.len - 1] != '/') { + try get_url.appendSlice(alloc, "/"); + } + + try get_url.appendSlice(alloc, request.object); + try get_url.appendSlice(alloc, query); + + if (debug_logging) { + std.debug.print("get_url:\n\n{s}\n\n", .{get_url.items}); + } + + return try self._alloc.dupe(u8, get_url.items); +} + +// Note: run `zig test src/s3/get_policy.zig --test-filter "get_policy"` +// to only run tests in this file +const TestPolicy = struct { + policy: Self, + date: []const u8, + date_time_8601: []const u8, + region: []const u8, + cred: []const u8, + expires: []const u8, + + pub fn deinit(self: *TestPolicy, alloc: Allocator) void { + alloc.free(self.date); + alloc.free(self.date_time_8601); + alloc.free(self.cred); + alloc.free(self.expires); + } +}; + +fn buildTestPolicy(alloc: Allocator, timestamp: i64, expires_seconds: u32) !TestPolicy { + const dt = UtcDateTime.init(timestamp); + const date = try dt.formatAmzDate(alloc); + errdefer alloc.free(date); + + const date_time_8601 = try dt.formatAmz(alloc); + errdefer alloc.free(date_time_8601); + + const cred = try getAmzCred(alloc, "AKIAIOSFODNN7EXAMPLE", date, "us-east-1"); + errdefer alloc.free(cred); + + const expires = try getAmzExpires(alloc, expires_seconds); + errdefer alloc.free(expires); + + const params: PolicyParams = .{ + .@"X-Amz-Credential" = cred, + .@"X-Amz-Date" = date_time_8601, + .@"X-Amz-Expires" = expires, + }; + + const config: S3Config = .{ + .access_key_id = "AKIAIOSFODNN7EXAMPLE", + .secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + .endpoint = "https://examplebucket.s3.amazonaws.com", + }; + + const request = PresignedRequest{ + .timestamp = timestamp, + .expires = expires_seconds, + .url = "https://examplebucket.s3.amazonaws.com", + .object = "test.txt", + }; + + return .{ + .date = date, + .date_time_8601 = date_time_8601, + .region = "us-east-1", + .cred = cred, + .expires = expires, + .policy = .{ + ._alloc = alloc, + .config = &config, + .request = request, + .params = params, + }, + }; +} + +test "test X-Amz-Algorithm" { + const alloc = std.testing.allocator; + + var tp = try buildTestPolicy(alloc, 1369353600, 86400); + defer tp.deinit(alloc); + + try expect(eql(u8, tp.policy.params.@"X-Amz-Algorithm", "AWS4-HMAC-SHA256")); +} + +test "test UtcDateTime.formatAmzDate()" { + const alloc = std.testing.allocator; + + const dt = UtcDateTime.init(1771693969); + const date = try dt.formatAmzDate(alloc); + defer alloc.free(date); + + try expect(eql(u8, date, "20260221")); +} + +test "test UtcDateTime.formatAmz()" { + const alloc = std.testing.allocator; + + const dt = UtcDateTime.init(1771693969); + const date_time_8601 = try dt.formatAmz(alloc); + defer alloc.free(date_time_8601); + // Expected 8601 format: "yyyyMMddTHHmmssZ" + try expect(eql(u8, date_time_8601, "20260221T171249Z")); +} + +test "test getAmzCred" { + const alloc = std.testing.allocator; + const access_key = "AKIAIOSFODNN7EXAMPLE"; + + const dt = UtcDateTime.init(1771693969); + const date = try dt.formatAmzDate(alloc); + defer alloc.free(date); + + const region = "us-east-1"; + + const cred = try getAmzCred(alloc, access_key, date, region); + defer alloc.free(cred); + + try expect(eql(u8, cred, "AKIAIOSFODNN7EXAMPLE/20260221/us-east-1/s3/aws4_request")); +} + +test "test getAmzScope" { + const alloc = std.testing.allocator; + + const dt = UtcDateTime.init(1771693969); + const date = try dt.formatAmzDate(alloc); + defer alloc.free(date); + + const region = "us-east-1"; + + const cred = try getAmzScope(alloc, date, region); + defer alloc.free(cred); + + try expect(eql(u8, cred, "20260221/us-east-1/s3/aws4_request")); +} + +test "test getAmzExpires" { + const alloc = std.testing.allocator; + const expires = try getAmzExpires(alloc, 1000); + defer alloc.free(expires); + try expect(std.mem.eql(u8, expires, "1000")); + + _ = getAmzExpires(alloc, 0) catch |err| { + try expect(err == error.ExpiresOutsideBounds); + }; + + _ = getAmzExpires(alloc, 1_000_000) catch |err| { + try expect(err == error.ExpiresOutsideBounds); + }; +} + +test "createCanonicalRequest" { + var arena: ArenaAllocator = .init(std.testing.allocator); + defer arena.deinit(); + const alloc: Allocator = arena.allocator(); + + var tp = try buildTestPolicy(alloc, 1369353600, 86400); + + const host = "examplebucket.s3.amazonaws.com"; + + const canonical_request = try tp.policy.createCanonicalRequest(alloc, "GET", host, "/test.txt"); + + const canonical_request_test = + \\GET + \\/test.txt + \\X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host + \\host:examplebucket.s3.amazonaws.com + \\ + \\host + \\UNSIGNED-PAYLOAD + ; + + try std.testing.expect(canonical_request.len == canonical_request_test.len); + try std.testing.expectEqualStrings(canonical_request, canonical_request_test); +} + +test "hash canonical request" { + var arena: ArenaAllocator = .init(std.testing.allocator); + defer arena.deinit(); + const alloc: Allocator = arena.allocator(); + + var tp = try buildTestPolicy(alloc, 1369353600, 86400); + + const host = "examplebucket.s3.amazonaws.com"; + + const canonical_request = try tp.policy.createCanonicalRequest(alloc, "GET", host, "/test.txt"); + + const canonical_request_test = + \\GET + \\/test.txt + \\X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host + \\host:examplebucket.s3.amazonaws.com + \\ + \\host + \\UNSIGNED-PAYLOAD + ; + + try std.testing.expect(canonical_request.len == canonical_request_test.len); + try std.testing.expectEqualStrings(canonical_request, canonical_request_test); + + const hashed_canonical_request = try hashCanonicalRequest(alloc, canonical_request); + + const expected_hash = "3bfa292879f6447bbcda7001decf97f4a54dc650c8942174ae0a9121cf58ad04"; + + try std.testing.expectEqualStrings(expected_hash, hashed_canonical_request); +} + +test "string to sign" { + var arena: ArenaAllocator = .init(std.testing.allocator); + defer arena.deinit(); + const alloc: Allocator = arena.allocator(); + + var tp = try buildTestPolicy(alloc, 1369353600, 86400); + + const host = "examplebucket.s3.amazonaws.com"; + + const canonical_request = try tp.policy.createCanonicalRequest(alloc, "GET", host, "/test.txt"); + + const canonical_request_test = + \\GET + \\/test.txt + \\X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host + \\host:examplebucket.s3.amazonaws.com + \\ + \\host + \\UNSIGNED-PAYLOAD + ; + + try std.testing.expect(canonical_request.len == canonical_request_test.len); + try std.testing.expectEqualStrings(canonical_request, canonical_request_test); + + const hashed_canonical_request = try hashCanonicalRequest(alloc, canonical_request); + + const expected_hash = "3bfa292879f6447bbcda7001decf97f4a54dc650c8942174ae0a9121cf58ad04"; + + try std.testing.expectEqualStrings(expected_hash, hashed_canonical_request); + + const scope = try getAmzScope(alloc, tp.date, tp.region); + + const string_to_sign = try createStringToSign(alloc, tp.date_time_8601, scope, hashed_canonical_request); + + const expected_string_to_sign = + \\AWS4-HMAC-SHA256 + \\20130524T000000Z + \\20130524/us-east-1/s3/aws4_request + \\3bfa292879f6447bbcda7001decf97f4a54dc650c8942174ae0a9121cf58ad04 + ; + + try std.testing.expectEqualStrings(expected_string_to_sign, string_to_sign); +} + +test "signature" { + var arena: ArenaAllocator = .init(std.testing.allocator); + defer arena.deinit(); + const alloc: Allocator = arena.allocator(); + + var tp = try buildTestPolicy(alloc, 1369353600, 86400); + defer tp.deinit(alloc); + + const host = "examplebucket.s3.amazonaws.com"; + const canonical_request = try tp.policy.createCanonicalRequest(alloc, "GET", host, "/test.txt"); + + const canonical_request_test = + \\GET + \\/test.txt + \\X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host + \\host:examplebucket.s3.amazonaws.com + \\ + \\host + \\UNSIGNED-PAYLOAD + ; + + try std.testing.expect(canonical_request.len == canonical_request_test.len); + try std.testing.expectEqualStrings(canonical_request, canonical_request_test); + + const hashed_canonical_request = try hashCanonicalRequest(alloc, canonical_request); + + const expected_hash = "3bfa292879f6447bbcda7001decf97f4a54dc650c8942174ae0a9121cf58ad04"; + + try std.testing.expectEqualStrings(expected_hash, hashed_canonical_request); + + const scope = try getAmzScope(alloc, tp.date, tp.region); + + const string_to_sign = try createStringToSign(alloc, tp.date_time_8601, scope, hashed_canonical_request); + + const expected_string_to_sign = + \\AWS4-HMAC-SHA256 + \\20130524T000000Z + \\20130524/us-east-1/s3/aws4_request + \\3bfa292879f6447bbcda7001decf97f4a54dc650c8942174ae0a9121cf58ad04 + ; + + try std.testing.expectEqualStrings(expected_string_to_sign, string_to_sign); + const secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"; + // Calculate signature + const signature: []const u8 = sig: { + const signing_key = try signer.deriveSigningKey( + alloc, + secret_access_key, + tp.date, + tp.region, + "s3", + ); + + break :sig try signer.calculateSignature(alloc, signing_key, string_to_sign); + }; + + const expected_signature = "aeeed9bbccd4d02ee5c0109b86d86835f995330da4c265957d157751f604d404"; + + try std.testing.expectEqualStrings(expected_signature, signature); +} + +test "buildQuery" { + var arena: ArenaAllocator = .init(std.testing.allocator); + defer arena.deinit(); + const alloc: Allocator = arena.allocator(); + + var tp = try buildTestPolicy(alloc, 1369353600, 86400); + const host = "examplebucket.s3.amazonaws.com"; + + const canonical_request = try tp.policy.createCanonicalRequest(alloc, "GET", host, "/test.txt"); + + const canonical_request_test = + \\GET + \\/test.txt + \\X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host + \\host:examplebucket.s3.amazonaws.com + \\ + \\host + \\UNSIGNED-PAYLOAD + ; + + try std.testing.expect(canonical_request.len == canonical_request_test.len); + try std.testing.expectEqualStrings(canonical_request, canonical_request_test); + + const hashed_canonical_request = try hashCanonicalRequest(alloc, canonical_request); + + const expected_hash = "3bfa292879f6447bbcda7001decf97f4a54dc650c8942174ae0a9121cf58ad04"; + + try std.testing.expectEqualStrings(expected_hash, hashed_canonical_request); + + const scope = try getAmzScope(alloc, tp.date, tp.region); + + const string_to_sign = try createStringToSign(alloc, tp.date_time_8601, scope, hashed_canonical_request); + + const expected_string_to_sign = + \\AWS4-HMAC-SHA256 + \\20130524T000000Z + \\20130524/us-east-1/s3/aws4_request + \\3bfa292879f6447bbcda7001decf97f4a54dc650c8942174ae0a9121cf58ad04 + ; + + try std.testing.expectEqualStrings(expected_string_to_sign, string_to_sign); + const secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"; + // Calculate signature + const signature: []const u8 = sig: { + const signing_key = try signer.deriveSigningKey( + alloc, + secret_access_key, + tp.date, + tp.region, + "s3", + ); + + break :sig try signer.calculateSignature(alloc, signing_key, string_to_sign); + }; + + const expected_signature = "aeeed9bbccd4d02ee5c0109b86d86835f995330da4c265957d157751f604d404"; + + try std.testing.expectEqualStrings(expected_signature, signature); + + tp.policy.params.@"X-Amz-Signature" = signature; + + const query = try tp.policy.buildQuery(alloc); + + const expected_query = "?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=aeeed9bbccd4d02ee5c0109b86d86835f995330da4c265957d157751f604d404"; + + try std.testing.expectEqualStrings(expected_query, query); +} + +test "get_policy presign" { + const alloc = std.testing.allocator; + + var tp = try buildTestPolicy(alloc, 1369353600, 86400); + defer tp.deinit(alloc); + + const presigned_get_url = try tp.policy.presign(); + defer alloc.free(presigned_get_url); + + const expected_presigned_get_url = "https://examplebucket.s3.amazonaws.com/test.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=aeeed9bbccd4d02ee5c0109b86d86835f995330da4c265957d157751f604d404"; + + try std.testing.expectEqualStrings(expected_presigned_get_url, presigned_get_url); +} diff --git a/src/s3/lib.zig b/src/s3/lib.zig index 9db701c..6de6fdf 100644 --- a/src/s3/lib.zig +++ b/src/s3/lib.zig @@ -12,7 +12,6 @@ //! var client = try S3Client.init(allocator, .{ //! .access_key_id = "your-key", //! .secret_access_key = "your-secret", -//! .region = "us-east-1", //! }); //! defer client.deinit(); //! @@ -25,6 +24,9 @@ const client = @import("client/implementation.zig"); const bucket_ops = @import("bucket/operations.zig"); const object_ops = @import("object/operations.zig"); +pub const PostPolicy = @import("post_policy.zig"); +pub const GetPolicy = @import("get_policy.zig"); + /// Possible errors that can occur during S3 operations. /// These errors cover both AWS-specific issues and general HTTP/network problems. pub const S3Error = error{ @@ -61,10 +63,13 @@ pub const S3Config = client.S3Config; pub const BucketInfo = bucket_ops.BucketInfo; /// Information about an object in S3 -pub const ObjectInfo = object_ops.ObjectInfo; +pub const ObjectInfo = bucket_ops.ObjectInfo; /// Options for listing objects in a bucket -pub const ListObjectsOptions = object_ops.ListObjectsOptions; +pub const ListObjectsOptions = bucket_ops.ListObjectsOptions; + +/// Metadata about an object in S3 +pub const ObjectMetadata = object_ops.ObjectMetadata; /// Helper struct for uploading different types of content to S3 pub const ObjectUploader = object_ops.ObjectUploader; @@ -124,12 +129,36 @@ pub const S3Client = struct { return bucket_ops.listBuckets(self.inner); } + /// List objects in a bucket with optional filtering and pagination. + /// Memory for the returned slice and its contents must be freed by the caller. + /// + /// Arguments: + /// bucket_name: Name of the bucket to list + /// options: Optional parameters for filtering and pagination + /// + /// Returns: Slice of ObjectInfo structs + /// + /// Errors: + /// BucketNotFound: If the bucket doesn't exist + /// InvalidResponse: If listing fails + /// ConnectionFailed: Network or connection issues + /// OutOfMemory: Memory allocation failure + pub fn listObjects(self: *S3Client, bucket_name: []const u8, options: ListObjectsOptions) ![]ObjectInfo { + return bucket_ops.listObjects(self.inner, bucket_name, options); + } + /// Upload an object to S3. /// See object/operations.zig for details. pub fn putObject(self: *S3Client, bucket_name: []const u8, key: []const u8, data: []const u8) !void { return object_ops.putObject(self.inner, bucket_name, key, data); } + /// Retrieves metadata for an object from S3. + /// See object/operations.zig for details. + pub fn headObject(self: *S3Client, bucket_name: []const u8, key: []const u8) !ObjectMetadata { + return object_ops.headObject(self.inner, bucket_name, key); + } + /// Download an object from S3. /// See object/operations.zig for details. pub fn getObject(self: *S3Client, bucket_name: []const u8, key: []const u8) ![]const u8 { @@ -142,26 +171,13 @@ pub const S3Client = struct { return object_ops.deleteObject(self.inner, bucket_name, key); } - /// List objects in a bucket with optional filtering and pagination. - /// Memory for the returned slice and its contents must be freed by the caller. - /// - /// Arguments: - /// bucket_name: Name of the bucket to list - /// options: Optional parameters for filtering and pagination - /// - /// Returns: Slice of ObjectInfo structs - /// - /// Errors: - /// BucketNotFound: If the bucket doesn't exist - /// InvalidResponse: If listing fails - /// ConnectionFailed: Network or connection issues - /// OutOfMemory: Memory allocation failure - pub fn listObjects(self: *S3Client, bucket_name: []const u8, options: ListObjectsOptions) ![]ObjectInfo { - return object_ops.listObjects(self.inner, bucket_name, options); - } - /// Create an object uploader helper for this client pub fn uploader(self: *S3Client) ObjectUploader { return ObjectUploader.init(self.inner); } }; + +test { + // Run all tests in the S3 module + @import("std").testing.refAllDecls(@This()); +} diff --git a/src/s3/object/operations.zig b/src/s3/object/operations.zig index d469b25..e84fccd 100644 --- a/src/s3/object/operations.zig +++ b/src/s3/object/operations.zig @@ -13,6 +13,13 @@ const bucket_ops = @import("../bucket/operations.zig"); const S3Error = lib.S3Error; const S3Client = client_impl.S3Client; +fn object_url(client: *const S3Client, bucket_name: []const u8, key: []const u8) ![]const u8 { + const endpoint = if (client.config.endpoint) |ep| ep else try fmt.allocPrint(client.allocator, "https://s3.{s}.amazonaws.com", .{client.config.region}); + defer if (client.config.endpoint == null) client.allocator.free(endpoint); + + return try fmt.allocPrint(client.allocator, "{s}/{s}/{s}", .{ endpoint, bucket_name, key }); +} + /// Upload an object to S3. /// /// Currently supports objects up to the size of available memory. @@ -30,223 +37,149 @@ const S3Client = client_impl.S3Client; /// - ConnectionFailed: Network or connection issues /// - OutOfMemory: Memory allocation failure pub fn putObject(self: *S3Client, bucket_name: []const u8, key: []const u8, data: []const u8) !void { - const endpoint = if (self.config.endpoint) |ep| ep else try fmt.allocPrint(self.allocator, "https://s3.{s}.amazonaws.com", .{self.config.region}); - defer if (self.config.endpoint == null) self.allocator.free(endpoint); - - const uri_str = try fmt.allocPrint(self.allocator, "{s}/{s}/{s}", .{ endpoint, bucket_name, key }); + const uri_str = try object_url(self, bucket_name, key); defer self.allocator.free(uri_str); - var req = try self.request(.PUT, try Uri.parse(uri_str), data); - defer req.deinit(); - - if (req.response.status != .ok) { + const res = try self.request(.PUT, try Uri.parse(uri_str), .{ .body = data }); + if (res.status != .ok) { return S3Error.InvalidResponse; } } -/// Download an object from S3. -/// -/// Currently limited to objects up to 1MB in size. -/// For larger objects, streaming download support is needed (TODO). +/// Object metadata returned by headObject +pub const ObjectMetadata = struct { + /// Size of the object (in bytes) + size: u64, + /// Last modified timestamp as ISO-8601 string + last_modified: []const u8, + /// ETag of the object (usually MD5 of content) + etag: []const u8, + /// The MIME type of the object's content. + content_type: []const u8, + + pub fn deinit(self: *ObjectMetadata, alloc: Allocator) void { + alloc.free(self.last_modified); + alloc.free(self.etag); + alloc.free(self.content_type); + } +}; + +/// Retrieves metadata for an object from S3. /// /// Parameters: /// - self: Pointer to initialized S3Client /// - bucket_name: Name of the bucket containing the object /// - key: Object key (path) in the bucket /// -/// Returns: Object content as a slice. Caller owns the memory. +/// Returns: Object metadata. /// /// Errors: /// - ObjectNotFound: If the object doesn't exist /// - BucketNotFound: If the bucket doesn't exist -/// - InvalidResponse: If download fails +/// - InvalidResponse: If request fails /// - ConnectionFailed: Network or connection issues /// - OutOfMemory: Memory allocation failure -pub fn getObject(self: *S3Client, bucket_name: []const u8, key: []const u8) ![]const u8 { - const endpoint = if (self.config.endpoint) |ep| ep else try fmt.allocPrint(self.allocator, "https://s3.{s}.amazonaws.com", .{self.config.region}); - defer if (self.config.endpoint == null) self.allocator.free(endpoint); - - const uri_str = try fmt.allocPrint(self.allocator, "{s}/{s}/{s}", .{ endpoint, bucket_name, key }); +pub fn headObject(self: *S3Client, bucket_name: []const u8, key: []const u8) !ObjectMetadata { + const uri_str = try object_url(self, bucket_name, key); defer self.allocator.free(uri_str); - var req = try self.request(.GET, try Uri.parse(uri_str), null); - defer req.deinit(); + var head: http.Client.Response.Head = undefined; + defer self.allocator.free(head.bytes); - if (req.response.status == .not_found) { + const res = try self.request(.HEAD, try Uri.parse(uri_str), .{ .response = .{ .head = &head } }); + if (res.status == .not_found) { return S3Error.ObjectNotFound; } - if (req.response.status != .ok) { + if (res.status != .ok) { return S3Error.InvalidResponse; } - // TODO: Support streaming for large objects - return try req.reader().readAllAlloc(self.allocator, 1024 * 1024); // 1MB max + var last_modified: []const u8 = ""; + errdefer self.allocator.free(last_modified); + + var etag: []const u8 = ""; + errdefer self.allocator.free(etag); + + var it = head.iterateHeaders(); + while (it.next()) |header| { + if (std.ascii.eqlIgnoreCase(header.name, "last-modified")) { + last_modified = try self.allocator.dupe(u8, header.value); + } else if (std.ascii.eqlIgnoreCase(header.name, "etag")) { + etag = try self.allocator.dupe(u8, header.value); + } + } + + if (last_modified.len == 0 or etag.len == 0) return error.InvalidResponse; + + const content_type = try self.allocator.dupe(u8, head.content_type orelse return error.InvalidResponse); + errdefer self.allocator.free(content_type); + + return .{ + .size = head.content_length orelse return error.InvalidResponse, + .last_modified = last_modified, + .etag = etag, + .content_type = content_type, + }; } -/// Delete an object from S3. +/// Download an object from S3. /// -/// This operation cannot be undone unless versioning is enabled on the bucket. +/// Currently limited to objects up to 1MB in size. +/// For larger objects, streaming download support is needed (TODO). /// /// Parameters: /// - self: Pointer to initialized S3Client /// - bucket_name: Name of the bucket containing the object -/// - key: Object key (path) to delete +/// - key: Object key (path) in the bucket +/// +/// Returns: Object content as a slice. Caller owns the memory. /// /// Errors: -/// - InvalidResponse: If deletion fails +/// - ObjectNotFound: If the object doesn't exist /// - BucketNotFound: If the bucket doesn't exist +/// - InvalidResponse: If download fails /// - ConnectionFailed: Network or connection issues /// - OutOfMemory: Memory allocation failure -pub fn deleteObject(self: *S3Client, bucket_name: []const u8, key: []const u8) !void { - const endpoint = if (self.config.endpoint) |ep| ep else try fmt.allocPrint(self.allocator, "https://s3.{s}.amazonaws.com", .{self.config.region}); - defer if (self.config.endpoint == null) self.allocator.free(endpoint); - - const uri_str = try fmt.allocPrint(self.allocator, "{s}/{s}/{s}", .{ endpoint, bucket_name, key }); +pub fn getObject(self: *S3Client, bucket_name: []const u8, key: []const u8) ![]const u8 { + const uri_str = try object_url(self, bucket_name, key); defer self.allocator.free(uri_str); - var req = try self.request(.DELETE, try Uri.parse(uri_str), null); - defer req.deinit(); + var response_writer = std.Io.Writer.Allocating.init(self.allocator); + defer response_writer.deinit(); - if (req.response.status != .no_content) { + const res = try self.request(.GET, try Uri.parse(uri_str), .{ .response = .{ .body = &response_writer.writer } }); + if (res.status == .not_found) { + return S3Error.ObjectNotFound; + } + if (res.status != .ok) { return S3Error.InvalidResponse; } -} -/// Object information returned by listObjects -pub const ObjectInfo = struct { - /// Key (path) of the object - key: []const u8, - /// Size of the object in bytes - size: u64, - /// Last modified timestamp as ISO-8601 string - last_modified: []const u8, - /// ETag of the object (usually MD5 of content) - etag: []const u8, -}; - -/// Options for listing objects -pub const ListObjectsOptions = struct { - /// Filter objects by prefix - prefix: ?[]const u8 = null, - /// Maximum number of objects to return (1-1000) - max_keys: ?u32 = null, - /// Start listing from this key (for pagination) - start_after: ?[]const u8 = null, -}; + return response_writer.toOwnedSlice(); +} -/// List objects in a bucket. +/// Delete an object from S3. /// -/// This implements the S3 ListObjectsV2 API. -/// Results are sorted by key in lexicographical order. +/// This operation cannot be undone unless versioning is enabled on the bucket. /// /// Parameters: /// - self: Pointer to initialized S3Client -/// - bucket_name: Name of the bucket to list -/// - options: Optional listing parameters (prefix, pagination) -/// -/// Returns: Slice of ObjectInfo structs. Caller owns the memory. +/// - bucket_name: Name of the bucket containing the object +/// - key: Object key (path) to delete /// /// Errors: +/// - InvalidResponse: If deletion fails /// - BucketNotFound: If the bucket doesn't exist -/// - InvalidResponse: If listing fails or response is malformed /// - ConnectionFailed: Network or connection issues /// - OutOfMemory: Memory allocation failure -pub fn listObjects( - self: *S3Client, - bucket_name: []const u8, - options: ListObjectsOptions, -) ![]ObjectInfo { - const endpoint = if (self.config.endpoint) |ep| ep else try fmt.allocPrint(self.allocator, "https://s3.{s}.amazonaws.com", .{self.config.region}); - defer if (self.config.endpoint == null) self.allocator.free(endpoint); - - // Build query string - var query = std.ArrayList(u8).init(self.allocator); - defer query.deinit(); - - try query.appendSlice("list-type=2"); // Use ListObjectsV2 - - if (options.prefix) |prefix| { - try query.appendSlice("&prefix="); - try query.appendSlice(prefix); - } - - if (options.max_keys) |max_keys| { - try query.appendSlice("&max-keys="); - try query.writer().print("{d}", .{max_keys}); - } - - if (options.start_after) |start_after| { - try query.appendSlice("&start-after="); - try query.appendSlice(start_after); - } - - const uri_str = try fmt.allocPrint(self.allocator, "{s}/{s}?{s}", .{ - endpoint, - bucket_name, - query.items, - }); +pub fn deleteObject(self: *S3Client, bucket_name: []const u8, key: []const u8) !void { + const uri_str = try object_url(self, bucket_name, key); defer self.allocator.free(uri_str); - var req = try self.request(.GET, try Uri.parse(uri_str), null); - defer req.deinit(); - - if (req.response.status == .not_found) { - return S3Error.BucketNotFound; - } - if (req.response.status != .ok) { + const res = try self.request(.DELETE, try Uri.parse(uri_str), .{}); + if (res.status != .no_content) { return S3Error.InvalidResponse; } - - // Read response body - const max_size = 1024 * 1024; // 1MB max response size - const body = try req.reader().readAllAlloc(self.allocator, max_size); - defer self.allocator.free(body); - - // Parse XML response - var objects = std.ArrayList(ObjectInfo).init(self.allocator); - errdefer { - for (objects.items) |object| { - self.allocator.free(object.key); - self.allocator.free(object.last_modified); - self.allocator.free(object.etag); - } - objects.deinit(); - } - - // Simple XML parsing - look for elements - var it = std.mem.splitSequence(u8, body, ""); - _ = it.first(); // Skip first part before any - - while (it.next()) |object_xml| { - // Extract key - const key_start = std.mem.indexOf(u8, object_xml, "") orelse continue; - const key_end = std.mem.indexOf(u8, object_xml, "") orelse continue; - const key = try self.allocator.dupe(u8, object_xml[key_start + 5 .. key_end]); - - // Extract size - const size_start = std.mem.indexOf(u8, object_xml, "") orelse continue; - const size_end = std.mem.indexOf(u8, object_xml, "") orelse continue; - const size = try std.fmt.parseInt(u64, object_xml[size_start + 6 .. size_end], 10); - - // Extract last modified - const lm_start = std.mem.indexOf(u8, object_xml, "") orelse continue; - const lm_end = std.mem.indexOf(u8, object_xml, "") orelse continue; - const last_modified = try self.allocator.dupe(u8, object_xml[lm_start + 13 .. lm_end]); - - // Extract ETag - const etag_start = std.mem.indexOf(u8, object_xml, "") orelse continue; - const etag_end = std.mem.indexOf(u8, object_xml, "") orelse continue; - const etag = try self.allocator.dupe(u8, object_xml[etag_start + 6 .. etag_end]); - - try objects.append(.{ - .key = key, - .size = size, - .last_modified = last_modified, - .etag = etag, - }); - } - - return objects.toOwnedSlice(); } pub const ObjectUploader = struct { @@ -306,15 +239,14 @@ pub const ObjectUploader = struct { key: []const u8, data: anytype, ) !void { - // Create a buffer for JSON string - var buffer = std.ArrayList(u8).init(self.client.allocator); - defer buffer.deinit(); + var writer = std.io.Writer.Allocating.init(self.client.allocator); + defer writer.deinit(); // Serialize to JSON - try std.json.stringify(data, .{}, buffer.writer()); + try std.json.Stringify.value(data, .{}, &writer.writer); // Upload the JSON data - try putObject(self.client, bucket_name, key, buffer.items); + try putObject(self.client, bucket_name, key, writer.written()); } }; @@ -322,15 +254,15 @@ test "upload different types" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); defer test_client.deinit(); - var uploader = ObjectUploader.init(&test_client); + var uploader = ObjectUploader.init(test_client); // File upload try uploader.uploadFile( @@ -362,9 +294,9 @@ test "list objects basic" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -392,7 +324,7 @@ test "list objects basic" { } // List all objects - const objects = try listObjects(test_client, bucket_name, .{}); + const objects = try bucket_ops.listObjects(test_client, bucket_name, .{}); defer { for (objects) |object| { allocator.free(object.key); @@ -422,9 +354,9 @@ test "list objects with prefix" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -452,7 +384,7 @@ test "list objects with prefix" { } // List objects with prefix - const objects = try listObjects(test_client, bucket_name, .{ + const objects = try bucket_ops.listObjects(test_client, bucket_name, .{ .prefix = "folder1/", }); defer { @@ -476,9 +408,9 @@ test "list objects pagination" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -508,7 +440,7 @@ test "list objects pagination" { } // List first page (2 objects) - const page1 = try listObjects(test_client, bucket_name, .{ + const page1 = try bucket_ops.listObjects(test_client, bucket_name, .{ .max_keys = 2, }); defer { @@ -523,7 +455,7 @@ test "list objects pagination" { try std.testing.expectEqual(@as(usize, 2), page1.len); // List second page using start_after - const page2 = try listObjects(test_client, bucket_name, .{ + const page2 = try bucket_ops.listObjects(test_client, bucket_name, .{ .max_keys = 2, .start_after = page1[1].key, }); @@ -545,9 +477,9 @@ test "list objects error cases" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -556,13 +488,13 @@ test "list objects error cases" { // Test non-existent bucket try std.testing.expectError( error.BucketNotFound, - listObjects(test_client, "nonexistent-bucket", .{}), + bucket_ops.listObjects(test_client, "nonexistent-bucket", .{}), ); // Test invalid max_keys try std.testing.expectError( error.InvalidResponse, - listObjects(test_client, "test-bucket", .{ + bucket_ops.listObjects(test_client, "test-bucket", .{ .max_keys = 1001, // Max allowed is 1000 }), ); @@ -573,9 +505,9 @@ test "object operations" { // Initialize test client with dummy credentials const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -585,6 +517,9 @@ test "object operations" { const test_data = "Hello, S3!"; try putObject(test_client, "test-bucket", "test-key", test_data); + var metadata = try headObject(test_client, "test-bucket", "test-key"); + defer metadata.deinit(allocator); + const retrieved = try getObject(test_client, "test-bucket", "test-key"); defer allocator.free(retrieved); try std.testing.expectEqualStrings(test_data, retrieved); @@ -596,9 +531,9 @@ test "object operations error handling" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -622,9 +557,9 @@ test "object operations with large data" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -654,9 +589,8 @@ test "object operations with custom endpoint" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", .endpoint = "http://localhost:9000", }; @@ -679,9 +613,9 @@ test "object key validation" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -727,9 +661,9 @@ test "list objects empty bucket" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -741,7 +675,7 @@ test "list objects empty bucket" { defer _ = bucket_ops.deleteBucket(test_client, bucket_name) catch {}; // List objects in empty bucket - const objects = try listObjects(test_client, bucket_name, .{}); + const objects = try bucket_ops.listObjects(test_client, bucket_name, .{}); defer allocator.free(objects); try std.testing.expectEqual(@as(usize, 0), objects.len); @@ -751,9 +685,9 @@ test "list objects with multiple prefixes" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -797,7 +731,7 @@ test "list objects with multiple prefixes" { }; for (test_cases) |case| { - const objects = try listObjects(test_client, bucket_name, .{ + const objects = try bucket_ops.listObjects(test_client, bucket_name, .{ .prefix = case.prefix, }); defer { @@ -824,9 +758,9 @@ test "list objects pagination with various sizes" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -858,17 +792,17 @@ test "list objects pagination with various sizes" { // Test different page sizes const page_sizes = [_]u32{ 5, 10, 15 }; for (page_sizes) |page_size| { - var collected_objects = std.ArrayList([]const u8).init(allocator); + var collected_objects: std.ArrayList([]const u8) = .empty; defer { for (collected_objects.items) |key| { allocator.free(key); } - collected_objects.deinit(); + collected_objects.deinit(allocator); } var last_key: ?[]const u8 = null; while (true) { - const page = try listObjects(test_client, bucket_name, .{ + const page = try bucket_ops.listObjects(test_client, bucket_name, .{ .max_keys = page_size, .start_after = last_key, }); @@ -886,7 +820,7 @@ test "list objects pagination with various sizes" { for (page) |object| { if (last_key == null or !std.mem.eql(u8, object.key, last_key.?)) { - try collected_objects.append(try allocator.dupe(u8, object.key)); + try collected_objects.append(allocator, try allocator.dupe(u8, object.key)); } } @@ -916,9 +850,9 @@ test "list objects with special characters in prefix" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -952,7 +886,7 @@ test "list objects with special characters in prefix" { // Test listing with various special character prefixes for (test_objects) |obj| { const prefix = obj.key[0 .. std.mem.indexOf(u8, obj.key, "/").? + 1]; - const objects = try listObjects(test_client, bucket_name, .{ + const objects = try bucket_ops.listObjects(test_client, bucket_name, .{ .prefix = prefix, }); defer { @@ -973,9 +907,9 @@ test "ObjectUploader basic functionality" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -998,6 +932,11 @@ test "ObjectUploader basic functionality" { try std.testing.expectEqualStrings(test_string, retrieved_string); // Test JSON upload + const TestJson = struct { + name: []const u8, + value: i32, + tags: [][]const u8, + }; const test_json = .{ .name = "test", .value = 42, @@ -1011,7 +950,7 @@ test "ObjectUploader basic functionality" { // Parse and verify JSON content const parsed = try std.json.parseFromSlice( - @TypeOf(test_json), + TestJson, allocator, retrieved_json, .{}, @@ -1033,9 +972,9 @@ test "ObjectUploader file operations" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -1053,7 +992,7 @@ test "ObjectUploader file operations" { const test_filename = "test-upload.txt"; // Create temporary directory for test files - try std.fs.cwd().makeDir("tmp") catch |err| switch (err) { + std.fs.cwd().makeDir("tmp") catch |err| switch (err) { error.PathAlreadyExists => {}, else => return err, }; @@ -1085,9 +1024,9 @@ test "ObjectUploader error cases" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", }; var test_client = try S3Client.init(allocator, config); @@ -1112,24 +1051,14 @@ test "ObjectUploader error cases" { error.FileNotFound, uploader.uploadFile("test-bucket", "test.txt", "nonexistent/file.txt"), ); - - // Test invalid JSON - const invalid_json = .{ - .recursive = @as(*anyopaque, undefined), // Can't serialize pointer types to JSON - }; - try std.testing.expectError( - error.InvalidValue, - uploader.uploadJson("test-bucket", "test.json", invalid_json), - ); } test "ObjectUploader with custom endpoint" { const allocator = std.testing.allocator; const config = client_impl.S3Config{ - .access_key_id = "test-key", - .secret_access_key = "test-secret", - .region = "us-east-1", + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", .endpoint = "http://localhost:9000", }; diff --git a/src/s3/post_policy.zig b/src/s3/post_policy.zig new file mode 100644 index 0000000..61d476f --- /dev/null +++ b/src/s3/post_policy.zig @@ -0,0 +1,361 @@ +const std = @import("std"); +const Allocator = std.mem.Allocator; +const ArenaAllocator = std.heap.ArenaAllocator; + +const signer = @import("client/auth/signer.zig"); +const UtcDateTime = @import("client/auth/time.zig").UtcDateTime; +const S3Config = @import("client/implementation.zig").S3Config; + +const Self = @This(); + +pub const ConditionMatch = union(enum) { + /// The form field value must match the value specified. + exact: []const u8, + /// The value must start with the specified value. + @"starts-with": []const u8, + /// For form fields that accept an upper and lower limit range (in bytes). + @"content-length-range": struct { min: u64, max: u64 }, + + fn jsonWrite(self: *const ConditionMatch, jws: anytype, name: []const u8) !void { + switch (self.*) { + .exact => |e| { + try jws.beginObject(); + try jws.objectField(name); + try jws.write(e); + try jws.endObject(); + }, + .@"starts-with" => |sw| { + try jws.beginArray(); + try jws.write("starts-with"); + try jws.print("\"${s}\"", .{name}); + try jws.write(sw); + try jws.endArray(); + }, + .@"content-length-range" => |r| { + try jws.beginArray(); + try jws.write("content-length-range"); + try jws.write(r.min); + try jws.write(r.max); + try jws.endArray(); + }, + } + } +}; + +pub const ConditionVariable = union(enum) { + /// Specifies the ACL value that must be used in the form submission. + /// This condition supports `exact` matching and `starts-with` condition match type. + acl, + /// Specifies the acceptable bucket name. + /// This condition supports `exact` matching condition match type. + bucket, + /// The minimum and maximum allowable size for the uploaded content. + /// This condition supports `content-length-range` condition match type. + @"content-length-range", + /// REST-specific headers. + /// This condition supports `exact` matching and `starts-with` condition match type. + @"Cache-Control", + /// REST-specific headers. + /// This condition supports `exact` matching and `starts-with` condition match type. + @"Content-Type", + /// REST-specific headers. + /// This condition supports `exact` matching and `starts-with` condition match type. + @"Content-Disposition", + /// REST-specific headers. + /// This condition supports `exact` matching and `starts-with` condition match type. + @"Content-Encoding", + /// REST-specific headers. + /// This condition supports `exact` matching and `starts-with` condition match type. + Expires, + /// The acceptable key name or a prefix of the uploaded object. + /// This condition supports `exact` matching and `starts-with` condition match type. + key, + /// The URL to which the client is redirected upon successful upload. + /// This condition supports `exact` matching and `starts-with` condition match type. + @"success-action-redirect", + /// The URL to which the client is redirected upon successful upload. + /// This condition supports `exact` matching and `starts-with` condition match type. + redirect, + + /// The signing algorithm that must be used during signature calculation. For AWS Signature Version 4, the value is AWS4-HMAC-SHA256. + /// This condition supports `exact` matching. + @"x-amz-algorithm", + /// The credentials that you used to calculate the signature. + @"x-amz-credential", + /// The date value specified in the ISO8601 formatted string. For example, 20130728T000000Z. + /// The date must be same that you used in creating the signing key for signature calculation. + /// This condition supports `exact` matching. + @"x-amz-date", + /// Amazon DevPay security token. + @"x-amz-security-token", + + /// User-specified metadata. + /// This condition supports `exact` matching and `starts-with` condition match type. + meta: []const u8, + /// The storage class to use for storing the object. + /// This condition supports `exact` matching. + @"x-amz-storage-class", + /// If the bucket is configured as a website, this field redirects requests for this object to another object in the same bucket or to an external URL. + /// This condition supports `exact` matching. + @"x-amz-website-redirect-location", + /// Indicates the algorithm used to create the checksum for the object. + /// This condition supports `exact` matching. + @"x-amz-checksum-algorithm": ChecksumAlgorithm, + + fn equals(self: ConditionVariable, other: ConditionVariable) bool { + return switch (self) { + .meta => other == .meta and std.mem.eql(u8, self.meta, other.meta), + else => std.meta.eql(self, other), + }; + } +}; + +pub const ChecksumAlgorithm = enum { + /// Specifies the base64-encoded, 32-bit CRC32 checksum of the object. + CRC32, + /// Specifies the base64-encoded, 32-bit CRC32C checksum of the object. + CRC32C, + /// Specifies the base64-encoded, 160-bit SHA-1 digest of the object. + SHA1, + /// Specifies the base64-encoded, 256-bit SHA-256 digest of the object. + SHA256, + + fn name(self: ChecksumAlgorithm) []const u8 { + return switch (self) { + .CRC32 => "x-amz-checksum-crc32", + .CRC32C => "x-amz-checksum-crc32c", + .SHA1 => "x-amz-checksum-sha1", + .SHA256 => "x-amz-checksum-sha256", + }; + } +}; + +pub const Condition = struct { + variable: ConditionVariable, + match: ConditionMatch, + + pub fn jsonStringify(self: *const Condition, jws: anytype) !void { + switch (self.variable) { + .meta => |meta| try self.match.jsonWrite(jws, meta), + .@"x-amz-checksum-algorithm" => |algo| { + const algoMatch: ConditionMatch = .{ .exact = @tagName(algo) }; + try algoMatch.jsonWrite(jws, "x-amz-checksum-algorithm"); + try self.match.jsonWrite(jws, algo.name()); + }, + else => try self.match.jsonWrite(jws, @tagName(self.variable)), + } + } + + pub fn formWrite(self: *const Condition, alloc: Allocator, form_data: *FormData) !void { + const val: []const u8 = switch (self.match) { + .exact => |e| e, + .@"starts-with" => |sw| sw, + else => return, + }; + + switch (self.variable) { + .meta => |meta| try form_data.put(alloc, meta, val), + .@"x-amz-checksum-algorithm" => |algo| { + try form_data.put(alloc, @tagName(self.variable), @tagName(algo)); + try form_data.put(alloc, algo.name(), val); + }, + else => try form_data.put(alloc, @tagName(self.variable), val), + } + } +}; + +const FormData = std.StringArrayHashMapUnmanaged([]const u8); + +_alloc: Allocator, + +/// Unix timestamp (in seconds) +expiration: i64, + +/// List of conditions in the policy +conditions: std.ArrayList(Condition) = .empty, + +/// POST form data +form_data: FormData = .empty, + +/// Create a new POST Policy that expires at the Unix timestamp (in seconds). +pub fn expires_at(alloc: Allocator, unix_timestamp_secs: i64) Self { + return .{ + ._alloc = alloc, + .expiration = unix_timestamp_secs, + }; +} + +/// Create a POST Policy that expires in a certain number of seconds from now. +pub fn expires_in(alloc: Allocator, seconds: u64) Self { + return .expires_at(alloc, std.time.timestamp() + @as(i64, @intCast(seconds))); +} + +pub fn deinit(self: *Self) void { + self.conditions.deinit(self._alloc); + self.form_data.deinit(self._alloc); +} + +/// Add custom condition to the policy. +pub fn add(self: *Self, cond: Condition) !void { + try cond.formWrite(self._alloc, &self.form_data); + try self.conditions.append(self._alloc, cond); +} + +/// Determine whether the policy includes the condition variable. +pub fn has(self: *const Self, cv: ConditionVariable) bool { + for (self.conditions.items) |c| { + if (c.variable.equals(cv)) { + return true; + } + } + return false; +} + +/// Set bucket name +pub fn setBucket(self: *Self, bucket: []const u8) !void { + return self.add(.{ .variable = .bucket, .match = .{ .exact = bucket } }); +} + +/// Set object name +pub fn setKey(self: *Self, key: []const u8) !void { + return self.add(.{ .variable = .key, .match = .{ .exact = key } }); +} + +/// Set object name prefix +pub fn setKeyStartsWith(self: *Self, prefix: []const u8) !void { + return self.add(.{ .variable = .key, .match = .{ .starts_with = prefix } }); +} + +/// Set content type +pub fn setContentType(self: *Self, key: []const u8) !void { + return self.add(.{ .variable = .@"Content-Type", .match = .{ .exact = key } }); +} + +/// Set content type prefix +pub fn setContentTypeStartsWith(self: *Self, prefix: []const u8) !void { + return self.add(.{ .variable = .@"Content-Type", .match = .{ .starts_with = prefix } }); +} + +/// Set content disposition +pub fn setContentDisposition(self: *Self, key: []const u8) !void { + return self.add(.{ .variable = .@"Content-Disposition", .match = .{ .exact = key } }); +} + +/// Set content length range +pub fn setContentLengthRange(self: *Self, min: u64, max: u64) !void { + return self.add(.{ .variable = .@"content-length-range", .match = .{ .@"content-length-range" = .{ .min = min, .max = max } } }); +} + +pub fn jsonStringify(self: *const Self, jws: anytype) !void { + try jws.beginObject(); + try jws.objectField("expiration"); + try jws.write(UtcDateTime.init(self.expiration)); + try jws.objectField("conditions"); + try jws.write(self.conditions.items); + try jws.endObject(); +} + +pub const PresignOptions = struct { + /// Whether to dupe all of the form data to extend its lifetime. + dupe: bool = false, +}; + +pub const Presigned = struct { + _arena: ArenaAllocator, + + post_url: []const u8, + form_data: FormData, + + pub fn deinit(self: *Presigned) void { + self._arena.deinit(); + } +}; + +/// Presigns the POST Policy +pub fn presign(self: *Self, config: *const S3Config, opts: PresignOptions) !Presigned { + var arena: ArenaAllocator = .init(self._alloc); + errdefer arena.deinit(); + const alloc: Allocator = arena.allocator(); + + const dt = UtcDateTime.now(); + const date_str = try dt.formatAmzDate(alloc); + defer alloc.free(date_str); + + if (!self.has(.@"x-amz-date")) { + try self.add(.{ .variable = .@"x-amz-date", .match = .{ .exact = try dt.formatAmz(alloc) } }); + } + if (!self.has(.@"x-amz-algorithm")) { + try self.add(.{ .variable = .@"x-amz-algorithm", .match = .{ .exact = "AWS4-HMAC-SHA256" } }); + } + if (!self.has(.@"x-amz-credential")) { + const cred: []const u8 = try std.fmt.allocPrint( + alloc, + "{s}/{s}/{s}/s3/aws4_request", + .{ config.access_key_id, date_str, config.region }, + ); + try self.add(.{ .variable = .@"x-amz-credential", .match = .{ .exact = cred } }); + } + + const policy: []const u8 = base64: { + const policy_json = try std.json.Stringify.valueAlloc(alloc, self, .{}); + defer alloc.free(policy_json); + var aw: std.io.Writer.Allocating = .init(alloc); + defer aw.deinit(); + try std.base64.standard.Encoder.encodeWriter(&aw.writer, policy_json); + break :base64 try aw.toOwnedSlice(); + }; + + // Calculate signature + const signature: []const u8 = sig: { + const signing_key = try signer.deriveSigningKey( + alloc, + config.secret_access_key, + date_str, + config.region, + "s3", + ); + defer alloc.free(signing_key); + + break :sig try signer.calculateSignature(alloc, signing_key, policy); + }; + + // Copy the policy's form data + var form_data: FormData = try .clone(self.form_data, alloc); + errdefer form_data.deinit(alloc); + + if (opts.dupe) { + // Clone all of the key-value pairs in the form data + var it = form_data.iterator(); + while (it.next()) |e| { + e.key_ptr.* = try alloc.dupe(u8, e.key_ptr.*); + e.value_ptr.* = try alloc.dupe(u8, e.value_ptr.*); + } + } + + // Add final entries into form data + try form_data.put(alloc, "policy", policy); + try form_data.put(alloc, "x-amz-signature", signature); + + const endpoint = if (config.endpoint) |ep| ep else try std.fmt.allocPrint(alloc, "https://s3.{s}.amazonaws.com", .{config.region}); + defer if (config.endpoint == null) alloc.free(endpoint); + if (endpoint.len == 0) { + return error.EmptyEndpoint; + } + + var post_url_writer: std.io.Writer.Allocating = .init(alloc); + defer post_url_writer.deinit(); + try post_url_writer.writer.writeAll(endpoint); + if (form_data.get("bucket")) |bucket_name| { + if (endpoint[endpoint.len - 1] != '/') { + _ = try post_url_writer.writer.write("/"); + } + _ = try post_url_writer.writer.write(bucket_name); + } + const post_url: []const u8 = try post_url_writer.toOwnedSlice(); + + return .{ + ._arena = arena, + .post_url = post_url, + .form_data = form_data, + }; +} diff --git a/tests/integration/s3_client_test.zig b/tests/integration/s3_client_test.zig index 40694ae..49e87e0 100644 --- a/tests/integration/s3_client_test.zig +++ b/tests/integration/s3_client_test.zig @@ -32,7 +32,6 @@ fn loadEnvVars() !s3.S3Config { return s3.S3Config{ .access_key_id = access_key.?, .secret_access_key = secret_key.?, - .region = "us-west-1", .endpoint = endpoint.?, }; } @@ -141,6 +140,40 @@ test "create simple bucket" { std.debug.print("Bucket '{s}' deleted successfully\n", .{bucket_name}); } +test "post policy" { + std.debug.print("\n=== Starting Presigned POST Policy test ===\n", .{}); + + const config: s3.S3Config = .{ + .access_key_id = "minioadmin", + .secret_access_key = "minioadmin", + .endpoint = "http://localhost:9000", + }; + const bucket_name = "integration-test-bucket-123"; + + var policy = s3.PostPolicy.expires_in(allocator, 60); + defer policy.deinit(); + try policy.setBucket(bucket_name); + try policy.setKey("my-key"); + try policy.setContentType("image/jpeg"); + try policy.setContentLengthRange(1, 8 * 1024 * 1024); // 8MB max + try policy.add(.{ .variable = .{ .@"x-amz-checksum-algorithm" = .SHA256 }, .match = .{ .exact = "base64 hash" } }); + + var presigned = try policy.presign(&config, .{}); + defer presigned.deinit(); + + const policy_base64 = presigned.form_data.get("policy").?; + const buffer_size = try std.base64.standard.Decoder.calcSizeForSlice(policy_base64); + const decoded_policy = try testing.allocator.alloc(u8, buffer_size); + defer testing.allocator.free(decoded_policy); + try std.base64.standard.Decoder.decode(decoded_policy, policy_base64); + + // Try to parse decoded policy as JSON + const result = try std.json.parseFromSlice(std.json.Value, testing.allocator, decoded_policy, .{}); + defer result.deinit(); + + try testing.expectEqualStrings(presigned.post_url, "http://localhost:9000/integration-test-bucket-123/my-key"); +} + // test "upload simple file to test-bucket" { // std.debug.print("\n=== Starting simple file upload test ===\n", .{});