diff --git a/.gitignore b/.gitignore index fa967abd7..8081bd0ff 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ *~ *.test validator +golangci-lint \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000..7d1dd3352 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,16 @@ +linters-settings: + misspell: + locale: US + +linters: + disable-all: true + enable: + - typecheck + - goimports + - misspell + - govet + - golint + - ineffassign + - gosimple + - deadcode + - structcheck diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 7ed7df14e..000000000 --- a/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -sudo: false -language: go - -os: -- linux - -env: -- ARCH=x86_64 -- ARCH=i686 - -go: -- 1.11.x -- tip - -matrix: - fast_finish: true - allow_failures: - - go: tip - -addons: - apt: - packages: - - devscripts - -script: -- diff -au <(gofmt -d .) <(printf "") -- diff -au <(licensecheck --check '.go$' --recursive --lines 0 * | grep -v -w 'Apache (v2.0)') <(printf "") -- make diff --git a/CNAME b/CNAME new file mode 100644 index 000000000..d365a7bb2 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +minio-go.min.io \ No newline at end of file diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 17973078e..f640dfb9f 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -5,7 +5,7 @@ Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) ### Making new releases -Tag and sign your release commit, additionally this step requires you to have access to Minio's trusted private key. +Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key. ```sh $ export GNUPGHOME=/media/${USER}/minio/trusted $ git tag -s 4.0.0 @@ -23,11 +23,11 @@ $ grep libraryVersion api.go Commit your changes ``` -$ git commit -a -m "Update version for next release" --author "Minio Trusted " +$ git commit -a -m "Update version for next release" --author "MinIO Trusted " ``` ### Announce -Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@minio.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release. +Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@min.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release. To generate `changelog` ```sh diff --git a/Makefile b/Makefile index bad81ffaf..d9b433443 100644 --- a/Makefile +++ b/Makefile @@ -1,15 +1,31 @@ +GOPATH := $(shell go env GOPATH) + all: checks -checks: - @go get -t ./... - @go vet ./... - @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... - @go get github.com/dustin/go-humanize/... - @go get github.com/sirupsen/logrus/... - @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go +.PHONY: examples docs + +checks: lint vet test examples functional-test + +lint: + @mkdir -p ${GOPATH}/bin + @which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0) + @echo "Running $@ check" + @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean + @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml + +vet: + @GO111MODULE=on go vet ./... + +test: + @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... + +examples: @mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done - @go get -u github.com/a8m/mark/... - @go get -u github.com/minio/cli/... - @go get -u golang.org/x/tools/cmd/goimports - @go get -u github.com/gernest/wow/... - @go build docs/validator.go && ./validator -m docs/API.md -t docs/checker.go.tpl + +functional-test: + @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go + +clean: + @echo "Cleaning up all the generated files" + @find . -name '*.test' | xargs rm -fv + @find . -name '*~' | xargs rm -fv diff --git a/NOTICE b/NOTICE index c521791c5..1e8fd3b92 100644 --- a/NOTICE +++ b/NOTICE @@ -1,2 +1,9 @@ -minio-go -Copyright 2015-2017 Minio, Inc. \ No newline at end of file +MinIO Cloud Storage, (C) 2014-2020 MinIO, Inc. + +This product includes software developed at MinIO, Inc. +(https://min.io/). + +The MinIO project contains unmodified/modified subcomponents too with +separate copyright notices and license terms. Your use of the source +code for these subcomponents is subject to the terms and conditions +of Apache License Version 2.0 diff --git a/README.md b/README.md index ad9d5e60b..0c83e9efb 100644 --- a/README.md +++ b/README.md @@ -1,43 +1,46 @@ -# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) +# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) -The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. +The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. -This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference). +This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference). -This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang). +This document assumes that you have a working [Go development environment](https://golang.org/doc/install). ## Download from Github ```sh -go get -u github.com/minio/minio-go +GO111MODULE=on go get github.com/minio/minio-go/v7 ``` -## Initialize Minio Client -Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage. +## Initialize MinIO Client +MinIO client requires the following four parameters specified to connect to an Amazon S3 compatible object storage. | Parameter | Description| | :--- | :--- | | endpoint | URL to object storage service. | -| accessKeyID | Access key is the user ID that uniquely identifies your account. | -| secretAccessKey | Secret key is the password to your account. | -| secure | Set this value to 'true' to enable secure (HTTPS) access. | +| _minio.Options_ | All the options such as credentials, custom transport etc. | ```go package main import ( - "github.com/minio/minio-go" "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { - endpoint := "play.minio.io:9000" + endpoint := "play.min.io" accessKeyID := "Q3AM3UQ867SPQQA43P2F" secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" useSSL := true // Initialize minio client object. - minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) if err != nil { log.Fatalln(err) } @@ -49,25 +52,31 @@ func main() { ## Quick Start Example - File Uploader This example program connects to an object storage server, creates a bucket and uploads a file to the bucket. -We will use the Minio server running at [https://play.minio.io:9000](https://play.minio.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public. +We will use the MinIO server running at [https://play.min.io](https://play.min.io) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public. ### FileUploader.go ```go package main import ( - "github.com/minio/minio-go" + "context" "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { - endpoint := "play.minio.io:9000" + endpoint := "play.min.io" accessKeyID := "Q3AM3UQ867SPQQA43P2F" secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" useSSL := true // Initialize minio client object. - minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) if err != nil { log.Fatalln(err) } @@ -76,11 +85,11 @@ func main() { bucketName := "mymusic" location := "us-east-1" - err = minioClient.MakeBucket(bucketName, location) + err = minioClient.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) if err != nil { // Check to see if we already own this bucket (which happens if you run this twice) - exists, err := minioClient.BucketExists(bucketName) - if err == nil && exists { + exists, errBucketExists := minioClient.BucketExists(bucketName) + if errBucketExists == nil && exists { log.Printf("We already own %s\n", bucketName) } else { log.Fatalln(err) @@ -95,7 +104,7 @@ func main() { contentType := "application/zip" // Upload the zip file with FPutObject - n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType}) + n, err := minioClient.FPutObject(context.Background(), bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType}) if err != nil { log.Fatalln(err) } @@ -117,58 +126,55 @@ mc ls play/mymusic/ ## API Reference The full API Reference is available here. -* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference) +* [Complete API Reference](https://docs.min.io/docs/golang-client-api-reference) ### API Reference : Bucket Operations -* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket) -* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets) -* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists) -* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket) -* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects) -* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2) -* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads) +* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) +* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) +* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) +* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) +* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) +* [`ListObjectsV2`](https://docs.min.io/docs/golang-client-api-reference#ListObjectsV2) +* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) ### API Reference : Bucket policy Operations -* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy) -* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy) +* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) +* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) ### API Reference : Bucket notification Operations -* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification) -* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension) +* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) +* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension) +* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO Extension) ### API Reference : File Object Operations -* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FGetObject) -* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext) -* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext) +* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) +* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FGetObject) ### API Reference : Object Operations -* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject) -* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject) -* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext) -* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext) -* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming) -* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject) -* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject) -* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject) -* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects) -* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload) -* [`SelectObjectContent`](https://docs.minio.io/docs/golang-client-api-reference#SelectObjectContent) +* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) +* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) +* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) +* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) +* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) +* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) +* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) +* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) ### API Reference : Presigned Operations -* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject) -* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject) -* [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject) -* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy) +* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) +* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) +* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) +* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) ### API Reference : Client custom settings -* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo) -* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport) -* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff) +* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) +* [`SetCustomTransport`](http://docs.min.io/docs/golang-client-api-reference#SetCustomTransport) +* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) +* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) ## Full Examples @@ -190,11 +196,22 @@ The full API Reference is available here. * [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) * [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) +### Full Examples : Bucket encryption Operations +* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go) +* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go) +* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go) + +### Full Examples : Bucket replication Operations +* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go) +* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go) +* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go) + ### Full Examples : Bucket notification Operations * [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) * [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) * [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) -* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension) +* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension) +* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO Extension) ### Full Examples : File Object Operations * [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) @@ -225,15 +242,11 @@ The full API Reference is available here. * [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) ## Explore Further -* [Complete Documentation](https://docs.minio.io) -* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference) -* [Go Music Player App Full Application Example](https://docs.minio.io/docs/go-music-player-app) +* [Complete Documentation](https://docs.min.io) +* [MinIO Go Client SDK API Reference](https://docs.min.io/docs/golang-client-api-reference) ## Contribute [Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) -[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) -[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) - ## License This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](./LICENSE) and [NOTICE](./NOTICE) for more information. diff --git a/README_zh_CN.md b/README_zh_CN.md index a5acf199e..0911b0905 100644 --- a/README_zh_CN.md +++ b/README_zh_CN.md @@ -1,12 +1,12 @@ -# 适用于与Amazon S3兼容云存储的Minio Go SDK [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) +# 适用于与Amazon S3兼容云存储的MinIO Go SDK [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) -Minio Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。 +MinIO Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。 -**支持的云存储:** +**支持的云存储:** - AWS Signature Version 4 - Amazon S3 - - Minio + - MinIO - AWS Signature Version 2 - Google Cloud Storage (兼容模式) @@ -14,22 +14,22 @@ Minio Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对 - Ceph Object Gateway - Riak CS -本文我们将学习如何安装Minio client SDK,连接到Minio,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference)。 +本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference)。 -本文假设你已经有 [Go开发环境](https://docs.minio.io/docs/how-to-install-golang)。 +本文假设你已经有 [Go开发环境](https://golang.org/doc/install)。 ## 从Github下载 ```sh go get -u github.com/minio/minio-go ``` -## 初始化Minio Client -Minio client需要以下4个参数来连接与Amazon S3兼容的对象存储。 +## 初始化MinIO Client +MinIO client需要以下4个参数来连接与Amazon S3兼容的对象存储。 -| 参数 | 描述| +| 参数 | 描述| | :--- | :--- | -| endpoint | 对象存储服务的URL | -| accessKeyID | Access key是唯一标识你的账户的用户ID。 | +| endpoint | 对象存储服务的URL | +| accessKeyID | Access key是唯一标识你的账户的用户ID。 | | secretAccessKey | Secret key是你账户的密码。 | | secure | true代表使用HTTPS | @@ -38,12 +38,12 @@ Minio client需要以下4个参数来连接与Amazon S3兼容的对象存储。 package main import ( - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" "log" ) func main() { - endpoint := "play.minio.io:9000" + endpoint := "play.min.io" accessKeyID := "Q3AM3UQ867SPQQA43P2F" secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" useSSL := true @@ -61,19 +61,19 @@ func main() { ## 示例-文件上传 本示例连接到一个对象存储服务,创建一个存储桶并上传一个文件到存储桶中。 -我们在本示例中使用运行在 [https://play.minio.io:9000](https://play.minio.io:9000) 上的Minio服务,你可以用这个服务来开发和测试。示例中的访问凭据是公开的。 +我们在本示例中使用运行在 [https://play.min.io](https://play.min.io) 上的MinIO服务,你可以用这个服务来开发和测试。示例中的访问凭据是公开的。 ### FileUploader.go ```go package main import ( - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" "log" ) func main() { - endpoint := "play.minio.io:9000" + endpoint := "play.min.io" accessKeyID := "Q3AM3UQ867SPQQA43P2F" secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" useSSL := true @@ -118,7 +118,7 @@ func main() { ### 运行FileUploader ```sh go run file-uploader.go -2016/08/13 17:03:28 Successfully created mymusic +2016/08/13 17:03:28 Successfully created mymusic 2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 mc ls play/mymusic/ @@ -127,60 +127,56 @@ mc ls play/mymusic/ ## API文档 完整的API文档在这里。 -* [完整API文档](https://docs.minio.io/docs/golang-client-api-reference) +* [完整API文档](https://docs.min.io/docs/golang-client-api-reference) ### API文档 : 操作存储桶 -* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket) -* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets) -* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists) -* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket) -* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects) -* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2) -* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads) +* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) +* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) +* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) +* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) +* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) +* [`ListObjectsV2`](https://docs.min.io/docs/golang-client-api-reference#ListObjectsV2) +* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) ### API文档 : 存储桶策略 -* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy) -* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy) +* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) +* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) ### API文档 : 存储桶通知 -* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification) -* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension) +* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) +* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension) ### API文档 : 操作文件对象 -* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) -* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext) -* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext) +* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) +* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) ### API文档 : 操作对象 -* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject) -* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject) -* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext) -* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext) -* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming) -* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject) -* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject) -* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject) -* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects) -* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload) +* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) +* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) +* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) +* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) +* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) +* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) +* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) +* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) ### API文档: 操作加密对象 -* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject) -* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject) +* [`GetEncryptedObject`](https://docs.min.io/docs/golang-client-api-reference#GetEncryptedObject) +* [`PutEncryptedObject`](https://docs.min.io/docs/golang-client-api-reference#PutEncryptedObject) ### API文档 : Presigned操作 -* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject) -* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject) -* [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject) -* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy) +* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) +* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) +* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) +* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) ### API文档 : 客户端自定义设置 -* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo) -* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport) -* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff) +* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) +* [`SetCustomTransport`](http://docs.min.io/docs/golang-client-api-reference#SetCustomTransport) +* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) +* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) ## 完整示例 @@ -197,12 +193,12 @@ mc ls play/mymusic/ * [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) * [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) * [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) - + ### 完整示例 : 存储桶通知 * [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) * [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) * [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) -* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio扩展) +* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO扩展) ### 完整示例 : 操作文件对象 * [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) @@ -233,13 +229,8 @@ mc ls play/mymusic/ * [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) ## 了解更多 -* [完整文档](https://docs.minio.io) -* [Minio Go Client SDK API文档](https://docs.minio.io/docs/golang-client-api-reference) -* [Go 音乐播放器完整示例](https://docs.minio.io/docs/go-music-player-app) +* [完整文档](https://docs.min.io) +* [MinIO Go Client SDK API文档](https://docs.min.io/docs/golang-client-api-reference) ## 贡献 [贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md) - -[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) -[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) - diff --git a/api-bucket-encryption.go b/api-bucket-encryption.go new file mode 100644 index 000000000..e02ab84af --- /dev/null +++ b/api-bucket-encryption.go @@ -0,0 +1,134 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/sse" +) + +// SetBucketEncryption sets the default encryption configuration on an existing bucket. +func (c Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if config == nil { + return errInvalidArgument("configuration cannot be empty") + } + + buf, err := xml.Marshal(config) + if err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("encryption", "") + + // Content-length is mandatory to set a default encryption configuration + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + } + + // Execute PUT to upload a new bucket default encryption configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// RemoveBucketEncryption removes the default encryption configuration on a bucket with a context to control cancellations and timeouts. +func (c Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("encryption", "") + + // DELETE default encryption configuration on a bucket. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// GetBucketEncryption gets the default encryption configuration +// on an existing bucket with a context to control cancellations and timeouts. +func (c Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("encryption", "") + + // Execute GET on bucket to get the default encryption configuration. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + encryptionConfig := &sse.Configuration{} + if err = xmlDecoder(resp.Body, encryptionConfig); err != nil { + return nil, err + } + + return encryptionConfig, nil +} diff --git a/api-bucket-lifecycle.go b/api-bucket-lifecycle.go new file mode 100644 index 000000000..e1fac813c --- /dev/null +++ b/api-bucket-lifecycle.go @@ -0,0 +1,147 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "io/ioutil" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/lifecycle" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketLifecycle set the lifecycle on an existing bucket. +func (c Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If lifecycle is empty then delete it. + if config.Empty() { + return c.removeBucketLifecycle(ctx, bucketName) + } + + buf, err := xml.Marshal(config) + if err != nil { + return err + } + + // Save the updated lifecycle. + return c.putBucketLifecycle(ctx, bucketName, buf) +} + +// Saves a new bucket lifecycle. +func (c Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Content-length is mandatory for put lifecycle request + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + } + + // Execute PUT to upload a new bucket lifecycle. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Remove lifecycle from a bucket. +func (c Client) removeBucketLifecycle(ctx context.Context, bucketName string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + return nil +} + +// GetBucketLifecycle fetch bucket lifecycle configuration +func (c Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + bucketLifecycle, err := c.getBucketLifecycle(ctx, bucketName) + if err != nil { + return nil, err + } + + config := lifecycle.NewConfiguration() + if err = xml.Unmarshal(bucketLifecycle, config); err != nil { + return nil, err + } + return config, nil +} + +// Request server for current bucket lifecycle. +func (c Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Execute GET on bucket to get lifecycle. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + } + + return ioutil.ReadAll(resp.Body) +} diff --git a/api-bucket-notification.go b/api-bucket-notification.go new file mode 100644 index 000000000..76787ecab --- /dev/null +++ b/api-bucket-notification.go @@ -0,0 +1,255 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bufio" + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + "time" + + jsoniter "github.com/json-iterator/go" + "github.com/minio/minio-go/v7/pkg/notification" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketNotification saves a new bucket notification with a context to control cancellations and timeouts. +func (c Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("notification", "") + + notifBytes, err := xml.Marshal(&config) + if err != nil { + return err + } + + notifBuffer := bytes.NewReader(notifBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: notifBuffer, + contentLength: int64(len(notifBytes)), + contentMD5Base64: sumMD5Base64(notifBytes), + contentSHA256Hex: sum256Hex(notifBytes), + } + + // Execute PUT to upload a new bucket notification. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config +func (c Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error { + return c.SetBucketNotification(ctx, bucketName, notification.Configuration{}) +} + +// GetBucketNotification returns current bucket notification configuration +func (c Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return notification.Configuration{}, err + } + return c.getBucketNotification(ctx, bucketName) +} + +// Request server for notification rules. +func (c Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) { + urlValues := make(url.Values) + urlValues.Set("notification", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + + defer closeResponse(resp) + if err != nil { + return notification.Configuration{}, err + } + return processBucketNotificationResponse(bucketName, resp) + +} + +// processes the GetNotification http response from the server. +func processBucketNotificationResponse(bucketName string, resp *http.Response) (notification.Configuration, error) { + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + return notification.Configuration{}, errResponse + } + var bucketNotification notification.Configuration + err := xmlDecoder(resp.Body, &bucketNotification) + if err != nil { + return notification.Configuration{}, err + } + return bucketNotification, nil +} + +// ListenNotification listen for all events, this is a MinIO specific API +func (c Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info { + return c.ListenBucketNotification(ctx, "", prefix, suffix, events) +} + +// ListenBucketNotification listen for bucket events, this is a MinIO specific API +func (c Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info { + notificationInfoCh := make(chan notification.Info, 1) + const notificationCapacity = 4 * 1024 * 1024 + notificationEventBuffer := make([]byte, notificationCapacity) + // Only success, start a routine to start reading line by line. + go func(notificationInfoCh chan<- notification.Info) { + defer close(notificationInfoCh) + + // Validate the bucket name. + if bucketName != "" { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + } + return + } + } + + // Check ARN partition to verify if listening bucket is supported + if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) { + select { + case notificationInfoCh <- notification.Info{ + Err: errAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"), + }: + case <-ctx.Done(): + } + return + } + + // Continuously run and listen on bucket notification. + // Create a done channel to control 'ListObjects' go routine. + retryDoneCh := make(chan struct{}, 1) + + // Indicate to our routine to exit cleanly upon return. + defer close(retryDoneCh) + + // Prepare urlValues to pass into the request on every loop + urlValues := make(url.Values) + urlValues.Set("prefix", prefix) + urlValues.Set("suffix", suffix) + urlValues["events"] = events + + // Wait on the jitter retry loop. + for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) { + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + if err != nil { + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + } + return + } + + // Validate http response, upon error return quickly. + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + select { + case notificationInfoCh <- notification.Info{ + Err: errResponse, + }: + case <-ctx.Done(): + } + return + } + + // Initialize a new bufio scanner, to read line by line. + bio := bufio.NewScanner(resp.Body) + + // Use a higher buffer to support unexpected + // caching done by proxies + bio.Buffer(notificationEventBuffer, notificationCapacity) + var json = jsoniter.ConfigCompatibleWithStandardLibrary + + // Unmarshal each line, returns marshaled values. + for bio.Scan() { + var notificationInfo notification.Info + if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { + // Unexpected error during json unmarshal, send + // the error to caller for actionable as needed. + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + return + } + closeResponse(resp) + continue + } + // Send notificationInfo + select { + case notificationInfoCh <- notificationInfo: + case <-ctx.Done(): + closeResponse(resp) + return + } + } + + if err = bio.Err(); err != nil { + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + return + } + } + + // Close current connection before looping further. + closeResponse(resp) + + } + }(notificationInfoCh) + + // Returns the notification info channel, for caller to start reading from. + return notificationInfoCh +} diff --git a/api-bucket-policy.go b/api-bucket-policy.go new file mode 100644 index 000000000..72676f344 --- /dev/null +++ b/api-bucket-policy.go @@ -0,0 +1,142 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketPolicy sets the access permissions on an existing bucket. +func (c Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If policy is empty then delete the bucket policy. + if policy == "" { + return c.removeBucketPolicy(ctx, bucketName) + } + + // Save the updated policies. + return c.putBucketPolicy(ctx, bucketName, policy) +} + +// Saves a new bucket policy. +func (c Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: strings.NewReader(policy), + contentLength: int64(len(policy)), + } + + // Execute PUT to upload a new bucket policy. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Removes all policies on a bucket. +func (c Client) removeBucketPolicy(ctx context.Context, bucketName string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + return nil +} + +// GetBucketPolicy returns the current policy +func (c Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + bucketPolicy, err := c.getBucketPolicy(ctx, bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "NoSuchBucketPolicy" { + return "", nil + } + return "", err + } + return bucketPolicy, nil +} + +// Request server for current bucket policy. +func (c Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + + defer closeResponse(resp) + if err != nil { + return "", err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", httpRespToErrorResponse(resp, bucketName, "") + } + } + + bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + + policy := string(bucketPolicyBuf) + return policy, err +} diff --git a/api-bucket-replication.go b/api-bucket-replication.go new file mode 100644 index 000000000..bfd5ea436 --- /dev/null +++ b/api-bucket-replication.go @@ -0,0 +1,149 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/replication" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// RemoveBucketReplication removes a replication config on an existing bucket. +func (c Client) RemoveBucketReplication(ctx context.Context, bucketName string) error { + return c.removeBucketReplication(ctx, bucketName) +} + +// SetBucketReplication sets a replication config on an existing bucket. +func (c Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If replication is empty then delete it. + if cfg.Empty() { + return c.removeBucketReplication(ctx, bucketName) + } + // Save the updated replication. + return c.putBucketReplication(ctx, bucketName, cfg) +} + +// Saves a new bucket replication. +func (c Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication", "") + replication, err := xml.Marshal(cfg) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(replication), + contentLength: int64(len(replication)), + contentMD5Base64: sumMD5Base64(replication), + } + + // Execute PUT to upload a new bucket replication config. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + + return nil +} + +// Remove replication from a bucket. +func (c Client) removeBucketReplication(ctx context.Context, bucketName string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + return nil +} + +// GetBucketReplication fetches bucket replication configuration.If config is not +// found, returns empty config with nil error. +func (c Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return cfg, err + } + bucketReplicationCfg, err := c.getBucketReplication(ctx, bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "ReplicationConfigurationNotFoundError" { + return cfg, nil + } + return cfg, err + } + return bucketReplicationCfg, nil +} + +// Request server for current bucket replication config. +func (c Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication", "") + + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return cfg, err + } + + if resp.StatusCode != http.StatusOK { + return cfg, httpRespToErrorResponse(resp, bucketName, "") + } + + if err = xmlDecoder(resp.Body, &cfg); err != nil { + return cfg, err + } + + return cfg, nil +} diff --git a/api-bucket-tagging.go b/api-bucket-tagging.go new file mode 100644 index 000000000..fcb966e63 --- /dev/null +++ b/api-bucket-tagging.go @@ -0,0 +1,135 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// GetBucketTagging fetch tagging configuration for a bucket with a +// context to control cancellations and timeouts. +func (c Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + // Execute GET on bucket to get tagging configuration. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + defer io.Copy(ioutil.Discard, resp.Body) + return tags.ParseBucketXML(resp.Body) +} + +// SetBucketTagging sets tagging configuration for a bucket +// with a context to control cancellations and timeouts. +func (c Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if tags == nil { + return errors.New("nil tags passed") + } + + buf, err := xml.Marshal(tags) + if err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + // Content-length is mandatory to set a default encryption configuration + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + } + + // Execute PUT on bucket to put tagging configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// RemoveBucketTagging removes tagging configuration for a +// bucket with a context to control cancellations and timeouts. +func (c Client) RemoveBucketTagging(ctx context.Context, bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + // Execute DELETE on bucket to remove tagging configuration. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} diff --git a/api-bucket-versioning.go b/api-bucket-versioning.go new file mode 100644 index 000000000..0889d43b0 --- /dev/null +++ b/api-bucket-versioning.go @@ -0,0 +1,120 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketVersioning sets a bucket versioning configuration +func (c Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + buf, err := xml.Marshal(config) + if err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("versioning", "") + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + contentSHA256Hex: sum256Hex(buf), + } + + // Execute PUT to set a bucket versioning. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// EnableVersioning - enable object versioning in given bucket. +func (c Client) EnableVersioning(ctx context.Context, bucketName string) error { + return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Enabled"}) +} + +// SuspendVersioning - suspend object versioning in given bucket. +func (c Client) SuspendVersioning(ctx context.Context, bucketName string) error { + return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Suspended"}) +} + +// BucketVersioningConfiguration is the versioning configuration structure +type BucketVersioningConfiguration struct { + XMLName xml.Name `xml:"VersioningConfiguration"` + Status string `xml:"Status"` + MFADelete string `xml:"MfaDelete,omitempty"` +} + +// GetBucketVersioning gets the versioning configuration on +// an existing bucket with a context to control cancellations and timeouts. +func (c Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return BucketVersioningConfiguration{}, err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("versioning", "") + + // Execute GET on bucket to get the versioning configuration. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return BucketVersioningConfiguration{}, err + } + + if resp.StatusCode != http.StatusOK { + return BucketVersioningConfiguration{}, httpRespToErrorResponse(resp, bucketName, "") + } + + versioningConfig := BucketVersioningConfiguration{} + if err = xmlDecoder(resp.Body, &versioningConfig); err != nil { + return versioningConfig, err + } + + return versioningConfig, nil +} diff --git a/api-compose-object.go b/api-compose-object.go index 3ac36c502..e10737558 100644 --- a/api-compose-object.go +++ b/api-compose-object.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017, 2018 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017, 2018 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,192 +28,175 @@ import ( "strings" "time" - "github.com/minio/minio-go/pkg/encrypt" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" ) -// DestinationInfo - type with information about the object to be -// created via server-side copy requests, using the Compose API. -type DestinationInfo struct { - bucket, object string - encryption encrypt.ServerSide +// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs +type CopyDestOptions struct { + Bucket string // points to destination bucket + Object string // points to destination object + // `Encryption` is the key info for server-side-encryption with customer + // provided key. If it is nil, no encryption is performed. + Encryption encrypt.ServerSide + + // `userMeta` is the user-metadata key-value pairs to be set on the + // destination. The keys are automatically prefixed with `x-amz-meta-` + // if needed. If nil is passed, and if only a single source (of any + // size) is provided in the ComposeObject call, then metadata from the + // source is copied to the destination. // if no user-metadata is provided, it is copied from source // (when there is only once source object in the compose // request) - userMetadata map[string]string + UserMetadata map[string]string + // UserMetadata is only set to destination if ReplaceMetadata is true + // other value is UserMetadata is ignored and we preserve src.UserMetadata + // NOTE: if you set this value to true and now metadata is present + // in UserMetadata your destination object will not have any metadata + // set. + ReplaceMetadata bool + + // `userTags` is the user defined object tags to be set on destination. + // This will be set only if the `replaceTags` field is set to true. + // Otherwise this field is ignored + UserTags map[string]string + ReplaceTags bool + + // Specifies whether you want to apply a Legal Hold to the copied object. + LegalHold LegalHoldStatus + + // Object Retention related fields + Mode RetentionMode + RetainUntilDate time.Time + + Size int64 // Needs to be specified if progress bar is specified. + // Progress of the entire copy operation will be sent here. + Progress io.Reader } -// NewDestinationInfo - creates a compose-object/copy-source -// destination info object. -// -// `encSSEC` is the key info for server-side-encryption with customer -// provided key. If it is nil, no encryption is performed. -// -// `userMeta` is the user-metadata key-value pairs to be set on the -// destination. The keys are automatically prefixed with `x-amz-meta-` -// if needed. If nil is passed, and if only a single source (of any -// size) is provided in the ComposeObject call, then metadata from the -// source is copied to the destination. -func NewDestinationInfo(bucket, object string, sse encrypt.ServerSide, userMeta map[string]string) (d DestinationInfo, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucket); err != nil { - return d, err - } - if err = s3utils.CheckValidObjectName(object); err != nil { - return d, err - } - - // Process custom-metadata to remove a `x-amz-meta-` prefix if - // present and validate that keys are distinct (after this - // prefix removal). +// Process custom-metadata to remove a `x-amz-meta-` prefix if +// present and validate that keys are distinct (after this +// prefix removal). +func filterCustomMeta(userMeta map[string]string) map[string]string { m := make(map[string]string) for k, v := range userMeta { if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { k = k[len("x-amz-meta-"):] } if _, ok := m[k]; ok { - return d, ErrInvalidArgument(fmt.Sprintf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k)) + continue } m[k] = v } - - return DestinationInfo{ - bucket: bucket, - object: object, - encryption: sse, - userMetadata: m, - }, nil + return m } -// getUserMetaHeadersMap - construct appropriate key-value pairs to send -// as headers from metadata map to pass into copy-object request. For -// single part copy-object (i.e. non-multipart object), enable the -// withCopyDirectiveHeader to set the `x-amz-metadata-directive` to -// `REPLACE`, so that metadata headers from the source are not copied -// over. -func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) map[string]string { - if len(d.userMetadata) == 0 { - return nil - } - r := make(map[string]string) - if withCopyDirectiveHeader { - r["x-amz-metadata-directive"] = "REPLACE" - } - for k, v := range d.userMetadata { - if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { - r[k] = v - } else { - r["x-amz-meta-"+k] = v +// Marshal converts all the CopyDestOptions into their +// equivalent HTTP header representation +func (opts CopyDestOptions) Marshal(header http.Header) { + const replaceDirective = "REPLACE" + if opts.ReplaceTags { + header.Set(amzTaggingHeaderDirective, replaceDirective) + if tags := s3utils.TagEncode(opts.UserTags); tags != "" { + header.Set(amzTaggingHeader, tags) } } - return r -} -// SourceInfo - represents a source object to be copied, using -// server-side copying APIs. -type SourceInfo struct { - bucket, object string - start, end int64 - encryption encrypt.ServerSide - // Headers to send with the upload-part-copy request involving - // this source object. - Headers http.Header -} + if opts.LegalHold != LegalHoldStatus("") { + header.Set(amzLegalHoldHeader, opts.LegalHold.String()) + } -// NewSourceInfo - create a compose-object/copy-object source info -// object. -// -// `decryptSSEC` is the decryption key using server-side-encryption -// with customer provided key. It may be nil if the source is not -// encrypted. -func NewSourceInfo(bucket, object string, sse encrypt.ServerSide) SourceInfo { - r := SourceInfo{ - bucket: bucket, - object: object, - start: -1, // range is unspecified by default - encryption: sse, - Headers: make(http.Header), + if opts.Mode != RetentionMode("") && !opts.RetainUntilDate.IsZero() { + header.Set(amzLockMode, opts.Mode.String()) + header.Set(amzLockRetainUntil, opts.RetainUntilDate.Format(time.RFC3339)) } - // Set the source header - r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object)) - return r -} + if opts.Encryption != nil { + opts.Encryption.Marshal(header) + } -// SetRange - Set the start and end offset of the source object to be -// copied. If this method is not called, the whole source object is -// copied. -func (s *SourceInfo) SetRange(start, end int64) error { - if start > end || start < 0 { - return ErrInvalidArgument("start must be non-negative, and start must be at most end.") + if opts.ReplaceMetadata { + header.Set("x-amz-metadata-directive", replaceDirective) + for k, v := range filterCustomMeta(opts.UserMetadata) { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { + header.Set(k, v) + } else { + header.Set("x-amz-meta-"+k, v) + } + } } - // Note that 0 <= start <= end - s.start, s.end = start, end - return nil } -// SetMatchETagCond - Set ETag match condition. The object is copied -// only if the etag of the source matches the value given here. -func (s *SourceInfo) SetMatchETagCond(etag string) error { - if etag == "" { - return ErrInvalidArgument("ETag cannot be empty.") +// toDestinationInfo returns a validated copyOptions object. +func (opts CopyDestOptions) validate() (err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil { + return err + } + if err = s3utils.CheckValidObjectName(opts.Object); err != nil { + return err + } + if opts.Progress != nil && opts.Size < 0 { + return errInvalidArgument("For progress bar effective size needs to be specified") } - s.Headers.Set("x-amz-copy-source-if-match", etag) return nil } -// SetMatchETagExceptCond - Set the ETag match exception -// condition. The object is copied only if the etag of the source is -// not the value given here. -func (s *SourceInfo) SetMatchETagExceptCond(etag string) error { - if etag == "" { - return ErrInvalidArgument("ETag cannot be empty.") - } - s.Headers.Set("x-amz-copy-source-if-none-match", etag) - return nil +// CopySrcOptions represents a source object to be copied, using +// server-side copying APIs. +type CopySrcOptions struct { + Bucket, Object string + VersionID string + MatchETag string + NoMatchETag string + MatchModifiedSince time.Time + MatchUnmodifiedSince time.Time + MatchRange bool + Start, End int64 + Encryption encrypt.ServerSide } -// SetModifiedSinceCond - Set the modified since condition. -func (s *SourceInfo) SetModifiedSinceCond(modTime time.Time) error { - if modTime.IsZero() { - return ErrInvalidArgument("Input time cannot be 0.") +// Marshal converts all the CopySrcOptions into their +// equivalent HTTP header representation +func (opts CopySrcOptions) Marshal(header http.Header) { + // Set the source header + header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)) + if opts.VersionID != "" { + header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)+"?versionId="+opts.VersionID) } - s.Headers.Set("x-amz-copy-source-if-modified-since", modTime.Format(http.TimeFormat)) - return nil -} -// SetUnmodifiedSinceCond - Set the unmodified since condition. -func (s *SourceInfo) SetUnmodifiedSinceCond(modTime time.Time) error { - if modTime.IsZero() { - return ErrInvalidArgument("Input time cannot be 0.") + if opts.MatchETag != "" { + header.Set("x-amz-copy-source-if-match", opts.MatchETag) + } + if opts.NoMatchETag != "" { + header.Set("x-amz-copy-source-if-none-match", opts.NoMatchETag) + } + + if !opts.MatchModifiedSince.IsZero() { + header.Set("x-amz-copy-source-if-modified-since", opts.MatchModifiedSince.Format(http.TimeFormat)) + } + if !opts.MatchUnmodifiedSince.IsZero() { + header.Set("x-amz-copy-source-if-unmodified-since", opts.MatchUnmodifiedSince.Format(http.TimeFormat)) + } + + if opts.Encryption != nil { + encrypt.SSECopy(opts.Encryption).Marshal(header) } - s.Headers.Set("x-amz-copy-source-if-unmodified-since", modTime.Format(http.TimeFormat)) - return nil } -// Helper to fetch size and etag of an object using a StatObject call. -func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[string]string, err error) { - // Get object info - need size and etag here. Also, decryption - // headers are added to the stat request if given. - var objInfo ObjectInfo - opts := StatObjectOptions{GetObjectOptions{ServerSideEncryption: encrypt.SSE(s.encryption)}} - objInfo, err = c.statObject(context.Background(), s.bucket, s.object, opts) - if err != nil { - err = ErrInvalidArgument(fmt.Sprintf("Could not stat object - %s/%s: %v", s.bucket, s.object, err)) - } else { - size = objInfo.Size - etag = objInfo.ETag - userMeta = make(map[string]string) - for k, v := range objInfo.Metadata { - if strings.HasPrefix(k, "x-amz-meta-") { - if len(v) > 0 { - userMeta[k] = v[0] - } - } - } +func (opts CopySrcOptions) validate() (err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil { + return err } - return + if err = s3utils.CheckValidObjectName(opts.Object); err != nil { + return err + } + if opts.Start > opts.End || opts.Start < 0 { + return errInvalidArgument("start must be non-negative, and start must be at most end.") + } + return nil } // Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. @@ -232,7 +215,7 @@ func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuck headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) // Send upload-part-copy request - resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ bucketName: destBucket, objectName: destObject, customHeader: headers, @@ -270,7 +253,7 @@ func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, dest headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) if startOffset < 0 { - return p, ErrInvalidArgument("startOffset must be non-negative") + return p, errInvalidArgument("startOffset must be non-negative") } if length >= 0 { @@ -285,7 +268,7 @@ func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, dest queryValues.Set("partNumber", strconv.Itoa(partID)) queryValues.Set("uploadId", uploadID) - resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ bucketName: destBucket, objectName: destObject, customHeader: headers, @@ -323,7 +306,7 @@ func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID str urlValues.Set("uploadId", uploadID) // Send upload-part-copy request - resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ bucketName: bucket, objectName: object, customHeader: headers, @@ -349,68 +332,72 @@ func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID str return p, nil } -// ComposeObjectWithProgress - creates an object using server-side copying of -// existing objects. It takes a list of source objects (with optional -// offsets) and concatenates them into a new object using only -// server-side copying operations. Optionally takes progress reader hook -// for applications to look at current progress. -func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo, progress io.Reader) error { +// ComposeObject - creates an object using server-side copying +// of existing objects. It takes a list of source objects (with optional offsets) +// and concatenates them into a new object using only server-side copying +// operations. Optionally takes progress reader hook for applications to +// look at current progress. +func (c Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) { if len(srcs) < 1 || len(srcs) > maxPartsCount { - return ErrInvalidArgument("There must be as least one and up to 10000 source objects.") + return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.") + } + + for _, src := range srcs { + if err := src.validate(); err != nil { + return UploadInfo{}, err + } + } + + if err := dst.validate(); err != nil { + return UploadInfo{}, err } - ctx := context.Background() - srcSizes := make([]int64, len(srcs)) - var totalSize, size, totalParts int64 - var srcUserMeta map[string]string - etags := make([]string, len(srcs)) + + srcObjectInfos := make([]ObjectInfo, len(srcs)) + srcObjectSizes := make([]int64, len(srcs)) + var totalSize, totalParts int64 var err error for i, src := range srcs { - size, etags[i], srcUserMeta, err = src.getProps(c) + opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID} + srcObjectInfos[i], err = c.statObject(context.Background(), src.Bucket, src.Object, opts) if err != nil { - return err - } - - // Error out if client side encryption is used in this source object when - // more than one source objects are given. - if len(srcs) > 1 && src.Headers.Get("x-amz-meta-x-amz-key") != "" { - return ErrInvalidArgument( - fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object)) + return UploadInfo{}, err } + srcCopySize := srcObjectInfos[i].Size // Check if a segment is specified, and if so, is the // segment within object bounds? - if src.start != -1 { + if src.MatchRange { // Since range is specified, // 0 <= src.start <= src.end // so only invalid case to check is: - if src.end >= size { - return ErrInvalidArgument( - fmt.Sprintf("SourceInfo %d has invalid segment-to-copy [%d, %d] (size is %d)", - i, src.start, src.end, size)) + if src.End >= srcCopySize || src.Start < 0 { + return UploadInfo{}, errInvalidArgument( + fmt.Sprintf("CopySrcOptions %d has invalid segment-to-copy [%d, %d] (size is %d)", + i, src.Start, src.End, srcCopySize)) } - size = src.end - src.start + 1 + srcCopySize = src.End - src.Start + 1 } // Only the last source may be less than `absMinPartSize` - if size < absMinPartSize && i < len(srcs)-1 { - return ErrInvalidArgument( - fmt.Sprintf("SourceInfo %d is too small (%d) and it is not the last part", i, size)) + if srcCopySize < absMinPartSize && i < len(srcs)-1 { + return UploadInfo{}, errInvalidArgument( + fmt.Sprintf("CopySrcOptions %d is too small (%d) and it is not the last part", i, srcCopySize)) } // Is data to copy too large? - totalSize += size + totalSize += srcCopySize if totalSize > maxMultipartPutObjectSize { - return ErrInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize)) + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize)) } // record source size - srcSizes[i] = size + srcObjectSizes[i] = srcCopySize // calculate parts needed for current source - totalParts += partsRequired(size) + totalParts += partsRequired(srcCopySize) // Do we need more parts than we are allowed? if totalParts > maxPartsCount { - return ErrInvalidArgument(fmt.Sprintf( + return UploadInfo{}, errInvalidArgument(fmt.Sprintf( "Your proposed compose object requires more than %d parts", maxPartsCount)) } } @@ -418,8 +405,8 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo // Single source object case (i.e. when only one source is // involved, it is being copied wholly and at most 5GiB in // size, emptyfiles are also supported). - if (totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { - return c.CopyObjectWithProgress(dst, srcs[0], progress) + if (totalParts == 1 && srcs[0].Start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { + return c.CopyObject(ctx, dst, srcs[0]) } // Now, handle multipart-copy cases. @@ -427,9 +414,7 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo // 1. Ensure that the object has not been changed while // we are copying data. for i, src := range srcs { - if src.Headers.Get("x-amz-copy-source-if-match") == "" { - src.SetMatchETagCond(etags[i]) - } + src.MatchETag = srcObjectInfos[i].ETag } // 2. Initiate a new multipart upload. @@ -437,37 +422,45 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo // Set user-metadata on the destination object. If no // user-metadata is specified, and there is only one source, // (only) then metadata from source is copied. - userMeta := dst.getUserMetaHeadersMap(false) - metaMap := userMeta - if len(userMeta) == 0 && len(srcs) == 1 { - metaMap = srcUserMeta + var userMeta map[string]string + if dst.ReplaceMetadata { + userMeta = dst.UserMetadata + } else { + userMeta = srcObjectInfos[0].UserMetadata } - metaHeaders := make(map[string]string) - for k, v := range metaMap { - metaHeaders[k] = v + + var userTags map[string]string + if dst.ReplaceTags { + userTags = dst.UserTags + } else { + userTags = srcObjectInfos[0].UserTags } - uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{ServerSideEncryption: dst.encryption, UserMetadata: metaHeaders}) + uploadID, err := c.newUploadID(ctx, dst.Bucket, dst.Object, PutObjectOptions{ + ServerSideEncryption: dst.Encryption, + UserMetadata: userMeta, + UserTags: userTags, + Mode: dst.Mode, + RetainUntilDate: dst.RetainUntilDate, + LegalHold: dst.LegalHold, + }) if err != nil { - return err + return UploadInfo{}, err } // 3. Perform copy part uploads objParts := []CompletePart{} partIndex := 1 for i, src := range srcs { - h := src.Headers - if src.encryption != nil { - encrypt.SSECopy(src.encryption).Marshal(h) - } - // Add destination encryption headers - if dst.encryption != nil { - dst.encryption.Marshal(h) + var h = make(http.Header) + src.Marshal(h) + if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC { + dst.Encryption.Marshal(h) } // calculate start/end indices of parts after // splitting. - startIdx, endIdx := calculateEvenSplits(srcSizes[i], src) + startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src) for j, start := range startIdx { end := endIdx[j] @@ -477,13 +470,13 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo fmt.Sprintf("bytes=%d-%d", start, end)) // make upload-part-copy request - complPart, err := c.uploadPartCopy(ctx, dst.bucket, - dst.object, uploadID, partIndex, h) + complPart, err := c.uploadPartCopy(ctx, dst.Bucket, + dst.Object, uploadID, partIndex, h) if err != nil { - return err + return UploadInfo{}, err } - if progress != nil { - io.CopyN(ioutil.Discard, progress, end-start+1) + if dst.Progress != nil { + io.CopyN(ioutil.Discard, dst.Progress, end-start+1) } objParts = append(objParts, complPart) partIndex++ @@ -491,20 +484,14 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo } // 4. Make final complete-multipart request. - _, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID, + uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID, completeMultipartUpload{Parts: objParts}) if err != nil { - return err + return UploadInfo{}, err } - return nil -} -// ComposeObject - creates an object using server-side copying of -// existing objects. It takes a list of source objects (with optional -// offsets) and concatenates them into a new object using only -// server-side copying operations. -func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { - return c.ComposeObjectWithProgress(dst, srcs, nil) + uploadInfo.Size = totalSize + return uploadInfo, nil } // partsRequired is maximum parts possible with @@ -522,7 +509,7 @@ func partsRequired(size int64) int64 { // start and end index slices. Splits happen evenly to be sure that no // part is less than 5MiB, as that could fail the multipart request if // it is not the last part. -func calculateEvenSplits(size int64, src SourceInfo) (startIndex, endIndex []int64) { +func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) { if size == 0 { return } @@ -543,7 +530,7 @@ func calculateEvenSplits(size int64, src SourceInfo) (startIndex, endIndex []int // size = q * k + r (by simple division of size by k, // so that 0 <= r < k) // - start := src.start + start := src.Start if start == -1 { start = 0 } diff --git a/api-compose-object_test.go b/api-compose-object_test.go index 295bbc263..fbf8f66a7 100644 --- a/api-compose-object_test.go +++ b/api-compose-object_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ package minio import ( + "net/http" "reflect" "strings" "testing" @@ -58,17 +59,17 @@ func TestCalculateEvenSplits(t *testing.T) { testCases := []struct { // input size and source object size int64 - src SourceInfo + src CopySrcOptions // output part-indexes starts, ends []int64 }{ - {0, SourceInfo{start: -1}, nil, nil}, - {1, SourceInfo{start: -1}, []int64{0}, []int64{0}}, - {1, SourceInfo{start: 0}, []int64{0}, []int64{0}}, + {0, CopySrcOptions{Start: -1}, nil, nil}, + {1, CopySrcOptions{Start: -1}, []int64{0}, []int64{0}}, + {1, CopySrcOptions{Start: 0}, []int64{0}, []int64{0}}, - {gb1, SourceInfo{start: -1}, []int64{0, 536870912}, []int64{536870911, 1073741823}}, - {gb5, SourceInfo{start: -1}, + {gb1, CopySrcOptions{Start: -1}, []int64{0, 536870912}, []int64{536870911, 1073741823}}, + {gb5, CopySrcOptions{Start: -1}, []int64{0, 536870912, 1073741824, 1610612736, 2147483648, 2684354560, 3221225472, 3758096384, 4294967296, 4831838208}, []int64{536870911, 1073741823, 1610612735, 2147483647, 2684354559, 3221225471, @@ -76,13 +77,13 @@ func TestCalculateEvenSplits(t *testing.T) { }, // 2 part splits - {gb5p1, SourceInfo{start: -1}, + {gb5p1, CopySrcOptions{Start: -1}, []int64{0, 536870913, 1073741825, 1610612737, 2147483649, 2684354561, 3221225473, 3758096385, 4294967297, 4831838209}, []int64{536870912, 1073741824, 1610612736, 2147483648, 2684354560, 3221225472, 3758096384, 4294967296, 4831838208, 5368709120}, }, - {gb5p1, SourceInfo{start: -1}, + {gb5p1, CopySrcOptions{Start: -1}, []int64{0, 536870913, 1073741825, 1610612737, 2147483649, 2684354561, 3221225473, 3758096385, 4294967297, 4831838209}, []int64{536870912, 1073741824, 1610612736, 2147483648, 2684354560, 3221225472, @@ -90,7 +91,7 @@ func TestCalculateEvenSplits(t *testing.T) { }, // 3 part splits - {gb10p1, SourceInfo{start: -1}, + {gb10p1, CopySrcOptions{Start: -1}, []int64{0, 536870913, 1073741825, 1610612737, 2147483649, 2684354561, 3221225473, 3758096385, 4294967297, 4831838209, 5368709121, 5905580033, 6442450945, 6979321857, 7516192769, 8053063681, @@ -100,7 +101,7 @@ func TestCalculateEvenSplits(t *testing.T) { 5905580032, 6442450944, 6979321856, 7516192768, 8053063680, 8589934592, 9126805504, 9663676416, 10200547328, 10737418240}, }, - {gb10p2, SourceInfo{start: -1}, + {gb10p2, CopySrcOptions{Start: -1}, []int64{0, 536870913, 1073741826, 1610612738, 2147483650, 2684354562, 3221225474, 3758096386, 4294967298, 4831838210, 5368709122, 5905580034, 6442450946, 6979321858, 7516192770, 8053063682, @@ -120,8 +121,7 @@ func TestCalculateEvenSplits(t *testing.T) { } } -func TestGetUserMetaHeadersMap(t *testing.T) { - +func TestDestOptions(t *testing.T) { userMetadata := map[string]string{ "test": "test", "x-amz-acl": "public-read-write", @@ -130,26 +130,27 @@ func TestGetUserMetaHeadersMap(t *testing.T) { "x-amz-grant-write": "test@exo.ch", } - destInfo := &DestinationInfo{"bucket", "object", nil, userMetadata} - - r := destInfo.getUserMetaHeadersMap(true) + r := make(http.Header) - i := 0 + dst := CopyDestOptions{ + Bucket: "bucket", + Object: "object", + ReplaceMetadata: true, + UserMetadata: userMetadata, + } + dst.Marshal(r) - if _, ok := r["x-amz-metadata-directive"]; !ok { - t.Errorf("Test %d - metadata directive was expected but is missing", i) - i++ + if v := r.Get("x-amz-metadata-directive"); v != "REPLACE" { + t.Errorf("Test - metadata directive was expected but is missing") } for k := range r { if strings.HasSuffix(k, "test") && !strings.HasPrefix(k, "x-amz-meta-") { - t.Errorf("Test %d - meta %q was expected as an x amz meta", i, k) - i++ + t.Errorf("Test meta %q was expected as an x amz meta", k) } if !strings.HasSuffix(k, "test") && strings.HasPrefix(k, "x-amz-meta-") { - t.Errorf("Test %d - an amz/standard/storageClass Header was expected but got an x amz meta data", i) - i++ + t.Errorf("Test an amz/standard/storageClass Header was expected but got an x amz meta data") } } } diff --git a/api-datatypes.go b/api-datatypes.go index 63fc08905..970e1fa5e 100644 --- a/api-datatypes.go +++ b/api-datatypes.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,6 +18,8 @@ package minio import ( + "encoding/xml" + "io" "net/http" "time" ) @@ -30,6 +32,59 @@ type BucketInfo struct { CreationDate time.Time `json:"creationDate"` } +// StringMap represents map with custom UnmarshalXML +type StringMap map[string]string + +// UnmarshalXML unmarshals the XML into a map of string to strings, +// creating a key in the map for each tag and setting it's value to the +// tags contents. +// +// The fact this function is on the pointer of Map is important, so that +// if m is nil it can be initialized, which is often the case if m is +// nested in another xml structural. This is also why the first thing done +// on the first line is initialize it. +func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + *m = StringMap{} + type xmlMapEntry struct { + XMLName xml.Name + Value string `xml:",chardata"` + } + for { + var e xmlMapEntry + err := d.Decode(&e) + if err == io.EOF { + break + } else if err != nil { + return err + } + (*m)[e.XMLName.Local] = e.Value + } + return nil +} + +// Owner name. +type Owner struct { + DisplayName string `json:"name"` + ID string `json:"id"` +} + +// UploadInfo contains information about the +// newly uploaded or copied object. +type UploadInfo struct { + Bucket string + Key string + ETag string + Size int64 + LastModified time.Time + Location string + VersionID string + + // Lifecycle expiry-date and ruleID associated with the expiry + // not to be confused with `Expires` HTTP header. + Expiration time.Time + ExpirationRuleID string +} + // ObjectInfo container for object metadata. type ObjectInfo struct { // An ETag is optionally set to md5sum of an object. In case of multipart objects, @@ -41,20 +96,54 @@ type ObjectInfo struct { LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. Size int64 `json:"size"` // Size in bytes of the object. ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. + Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached. // Collection of additional metadata on the object. // eg: x-amz-meta-*, content-encoding etc. Metadata http.Header `json:"metadata" xml:"-"` + // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. + UserMetadata StringMap `json:"userMetadata"` + + // x-amz-tagging values in their k/v values. + UserTags map[string]string `json:"userTags"` + + // x-amz-tagging-count value + UserTagCount int + // Owner name. - Owner struct { - DisplayName string `json:"name"` - ID string `json:"id"` - } `json:"owner"` + Owner Owner + + // ACL grant. + Grant []struct { + Grantee struct { + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName"` + URI string `xml:"URI"` + } `xml:"Grantee"` + Permission string `xml:"Permission"` + } `xml:"Grant"` // The class of storage used to store the object. StorageClass string `json:"storageClass"` + // Versioning related information + IsLatest bool + IsDeleteMarker bool + VersionID string `xml:"VersionId"` + + // x-amz-replication-status value is either in one of the following states + // - COMPLETE + // - PENDING + // - FAILED + // - REPLICA (on the destination) + ReplicationStatus string `xml:"ReplicationStatus"` + + // Lifecycle expiry-date and ruleID associated with the expiry + // not to be confused with `Expires` HTTP header. + Expiration time.Time + ExpirationRuleID string + // Error Err error `json:"-"` } diff --git a/api-error-response.go b/api-error-response.go index 38b85c426..ecaea2aec 100644 --- a/api-error-response.go +++ b/api-error-response.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -51,6 +51,9 @@ type ErrorResponse struct { // only in HEAD bucket and ListObjects response. Region string + // Captures the server string returned in response header. + Server string + // Underlying HTTP status code for the returned error StatusCode int `xml:"-" json:"-"` } @@ -60,7 +63,7 @@ type ErrorResponse struct { // // For example: // -// import s3 "github.com/minio/minio-go" +// import s3 "github.com/minio/minio-go/v7" // ... // ... // reader, stat, err := s3.GetObject(...) @@ -100,16 +103,17 @@ const ( func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { if resp == nil { msg := "Response is empty. " + reportIssue - return ErrInvalidArgument(msg) + return errInvalidArgument(msg) } errResp := ErrorResponse{ StatusCode: resp.StatusCode, + Server: resp.Header.Get("Server"), } err := xmlDecoder(resp.Body, &errResp) // Xml decoding failed with no body, fall back to HTTP headers. - if err != nil || errResp.Message == "" { + if err != nil || errResp.Message == "" { switch resp.StatusCode { case http.StatusNotFound: if objectName == "" { @@ -179,8 +183,8 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) return errResp } -// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. -func ErrTransferAccelerationBucket(bucketName string) error { +// errTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. +func errTransferAccelerationBucket(bucketName string) error { return ErrorResponse{ StatusCode: http.StatusBadRequest, Code: "InvalidArgument", @@ -189,8 +193,8 @@ func ErrTransferAccelerationBucket(bucketName string) error { } } -// ErrEntityTooLarge - Input size is larger than supported maximum. -func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { +// errEntityTooLarge - Input size is larger than supported maximum. +func errEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) return ErrorResponse{ StatusCode: http.StatusBadRequest, @@ -201,8 +205,8 @@ func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName st } } -// ErrEntityTooSmall - Input size is smaller than supported minimum. -func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error { +// errEntityTooSmall - Input size is smaller than supported minimum. +func errEntityTooSmall(totalSize int64, bucketName, objectName string) error { msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize) return ErrorResponse{ StatusCode: http.StatusBadRequest, @@ -213,8 +217,8 @@ func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error { } } -// ErrUnexpectedEOF - Unexpected end of file reached. -func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { +// errUnexpectedEOF - Unexpected end of file reached. +func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize) return ErrorResponse{ StatusCode: http.StatusBadRequest, @@ -225,8 +229,8 @@ func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) } } -// ErrInvalidBucketName - Invalid bucket name response. -func ErrInvalidBucketName(message string) error { +// errInvalidBucketName - Invalid bucket name response. +func errInvalidBucketName(message string) error { return ErrorResponse{ StatusCode: http.StatusBadRequest, Code: "InvalidBucketName", @@ -235,8 +239,8 @@ func ErrInvalidBucketName(message string) error { } } -// ErrInvalidObjectName - Invalid object name response. -func ErrInvalidObjectName(message string) error { +// errInvalidObjectName - Invalid object name response. +func errInvalidObjectName(message string) error { return ErrorResponse{ StatusCode: http.StatusNotFound, Code: "NoSuchKey", @@ -245,12 +249,8 @@ func ErrInvalidObjectName(message string) error { } } -// ErrInvalidObjectPrefix - Invalid object prefix response is -// similar to object name response. -var ErrInvalidObjectPrefix = ErrInvalidObjectName - -// ErrInvalidArgument - Invalid argument response. -func ErrInvalidArgument(message string) error { +// errInvalidArgument - Invalid argument response. +func errInvalidArgument(message string) error { return ErrorResponse{ StatusCode: http.StatusBadRequest, Code: "InvalidArgument", @@ -259,20 +259,9 @@ func ErrInvalidArgument(message string) error { } } -// ErrNoSuchBucketPolicy - No Such Bucket Policy response -// The specified bucket does not have a bucket policy. -func ErrNoSuchBucketPolicy(message string) error { - return ErrorResponse{ - StatusCode: http.StatusNotFound, - Code: "NoSuchBucketPolicy", - Message: message, - RequestID: "minio", - } -} - -// ErrAPINotSupported - API not supported response +// errAPINotSupported - API not supported response // The specified API call is not supported -func ErrAPINotSupported(message string) error { +func errAPINotSupported(message string) error { return ErrorResponse{ StatusCode: http.StatusNotImplemented, Code: "APINotSupported", diff --git a/api-error-response_test.go b/api-error-response_test.go index 353103bdd..d7aa8c5ab 100644 --- a/api-error-response_test.go +++ b/api-error-response_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -193,7 +193,7 @@ func TestErrEntityTooLarge(t *testing.T) { BucketName: "minio-bucket", Key: "Asia/", } - actualResult := ErrEntityTooLarge(1000000, 99999, "minio-bucket", "Asia/") + actualResult := errEntityTooLarge(1000000, 99999, "minio-bucket", "Asia/") if !reflect.DeepEqual(expectedResult, actualResult) { t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult) } @@ -209,7 +209,7 @@ func TestErrEntityTooSmall(t *testing.T) { BucketName: "minio-bucket", Key: "Asia/", } - actualResult := ErrEntityTooSmall(-1, "minio-bucket", "Asia/") + actualResult := errEntityTooSmall(-1, "minio-bucket", "Asia/") if !reflect.DeepEqual(expectedResult, actualResult) { t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult) } @@ -226,7 +226,7 @@ func TestErrUnexpectedEOF(t *testing.T) { BucketName: "minio-bucket", Key: "Asia/", } - actualResult := ErrUnexpectedEOF(100, 101, "minio-bucket", "Asia/") + actualResult := errUnexpectedEOF(100, 101, "minio-bucket", "Asia/") if !reflect.DeepEqual(expectedResult, actualResult) { t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult) } @@ -240,7 +240,7 @@ func TestErrInvalidBucketName(t *testing.T) { Message: "Invalid Bucket name", RequestID: "minio", } - actualResult := ErrInvalidBucketName("Invalid Bucket name") + actualResult := errInvalidBucketName("Invalid Bucket name") if !reflect.DeepEqual(expectedResult, actualResult) { t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult) } @@ -254,13 +254,13 @@ func TestErrInvalidObjectName(t *testing.T) { Message: "Invalid Object Key", RequestID: "minio", } - actualResult := ErrInvalidObjectName("Invalid Object Key") + actualResult := errInvalidObjectName("Invalid Object Key") if !reflect.DeepEqual(expectedResult, actualResult) { t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult) } } -// Test validates 'ErrInvalidArgument' response. +// Test validates 'errInvalidArgument' response. func TestErrInvalidArgument(t *testing.T) { expectedResult := ErrorResponse{ StatusCode: http.StatusBadRequest, @@ -268,7 +268,7 @@ func TestErrInvalidArgument(t *testing.T) { Message: "Invalid Argument", RequestID: "minio", } - actualResult := ErrInvalidArgument("Invalid Argument") + actualResult := errInvalidArgument("Invalid Argument") if !reflect.DeepEqual(expectedResult, actualResult) { t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult) } diff --git a/api-get-lifecycle.go b/api-get-lifecycle.go deleted file mode 100644 index 8097bfc02..000000000 --- a/api-get-lifecycle.go +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io/ioutil" - "net/http" - "net/url" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// GetBucketLifecycle - get bucket lifecycle. -func (c Client) GetBucketLifecycle(bucketName string) (string, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - bucketLifecycle, err := c.getBucketLifecycle(bucketName) - if err != nil { - errResponse := ToErrorResponse(err) - if errResponse.Code == "NoSuchLifecycleConfiguration" { - return "", nil - } - return "", err - } - return bucketLifecycle, nil -} - -// Request server for current bucket lifecycle. -func (c Client) getBucketLifecycle(bucketName string) (string, error) { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("lifecycle", "") - - // Execute GET on bucket to get lifecycle. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return "", err - } - - if resp != nil { - if resp.StatusCode != http.StatusOK { - return "", httpRespToErrorResponse(resp, bucketName, "") - } - } - - bucketLifecycleBuf, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - - lifecycle := string(bucketLifecycleBuf) - return lifecycle, err -} diff --git a/api-get-object-acl.go b/api-get-object-acl.go index af5544da3..afa53079d 100644 --- a/api-get-object-acl.go +++ b/api-get-object-acl.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2018 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -40,10 +40,9 @@ type accessControlPolicy struct { } `xml:"AccessControlList"` } -//GetObjectACL get object ACLs -func (c Client) GetObjectACL(bucketName, objectName string) (*ObjectInfo, error) { - - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ +// GetObjectACL get object ACLs +func (c Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) { + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ bucketName: bucketName, objectName: objectName, queryValues: url.Values{ @@ -65,11 +64,16 @@ func (c Client) GetObjectACL(bucketName, objectName string) (*ObjectInfo, error) return nil, err } - objInfo, err := c.statObject(context.Background(), bucketName, objectName, StatObjectOptions{}) + objInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{}) if err != nil { return nil, err } + objInfo.Owner.DisplayName = res.Owner.DisplayName + objInfo.Owner.ID = res.Owner.ID + + objInfo.Grant = append(objInfo.Grant, res.AccessControlList.Grant...) + cannedACL := getCannedACL(res) if cannedACL != "" { objInfo.Metadata.Add("X-Amz-Acl", cannedACL) diff --git a/api-get-object-context.go b/api-get-object-context.go deleted file mode 100644 index f8dfac7d6..000000000 --- a/api-get-object-context.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import "context" - -// GetObjectWithContext - returns an seekable, readable object. -// The options can be used to specify the GET request further. -func (c Client) GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { - return c.getObjectWithContext(ctx, bucketName, objectName, opts) -} diff --git a/api-get-object-file.go b/api-get-object-file.go index a852220a2..bccff4578 100644 --- a/api-get-object-file.go +++ b/api-get-object-file.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,22 +23,12 @@ import ( "os" "path/filepath" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/s3utils" ) -// FGetObjectWithContext - download contents of an object to a local file. -// The options can be used to specify the GET request further. -func (c Client) FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { - return c.fGetObjectWithContext(ctx, bucketName, objectName, filePath, opts) -} - // FGetObject - download contents of an object to a local file. -func (c Client) FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error { - return c.fGetObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) -} - -// fGetObjectWithContext - fgetObject wrapper function with context -func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { +// The options can be used to specify the GET request further. +func (c Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -52,7 +42,7 @@ func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectNam if err == nil { // If the destination exists and is a directory. if st.IsDir() { - return ErrInvalidArgument("fileName is a directory.") + return errInvalidArgument("fileName is a directory.") } } @@ -73,7 +63,7 @@ func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectNam } // Gather md5sum. - objectStat, err := c.StatObject(bucketName, objectName, StatObjectOptions{opts}) + objectStat, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts)) if err != nil { return err } @@ -87,6 +77,17 @@ func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectNam return err } + // If we return early with an error, be sure to close and delete + // filePart. If we have an error along the way there is a chance + // that filePart is somehow damaged, and we should discard it. + closeAndRemove := true + defer func() { + if closeAndRemove { + _ = filePart.Close() + _ = os.Remove(filePartPath) + } + }() + // Issue Stat to get the current offset. st, err = filePart.Stat() if err != nil { @@ -100,7 +101,7 @@ func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectNam } // Seek to current position for incoming reader. - objectReader, objectStat, err := c.getObject(ctx, bucketName, objectName, opts) + objectReader, objectStat, _, err := c.getObject(ctx, bucketName, objectName, opts) if err != nil { return err } @@ -111,6 +112,7 @@ func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectNam } // Close the file before rename, this is specifically needed for Windows users. + closeAndRemove = false if err = filePart.Close(); err != nil { return err } diff --git a/api-get-object.go b/api-get-object.go index 0bf556ec6..2df1112a9 100644 --- a/api-get-object.go +++ b/api-get-object.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,20 +23,14 @@ import ( "fmt" "io" "net/http" - "strings" + "net/url" "sync" - "time" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/s3utils" ) -// GetObject - returns an seekable, readable object. -func (c Client) GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) { - return c.getObjectWithContext(context.Background(), bucketName, objectName, opts) -} - // GetObject wrapper function that accepts a request context -func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { +func (c Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err @@ -45,6 +39,14 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName return nil, err } + // Detect if snowball is server location we are talking to. + var snowball bool + if location, ok := c.bucketLocCache.Get(bucketName); ok { + if location == "snowball" { + snowball = true + } + } + var httpReader io.ReadCloser var objectInfo ObjectInfo var err error @@ -92,7 +94,7 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName } else if req.Offset > 0 { opts.SetRange(req.Offset, 0) } - httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts) + httpReader, objectInfo, _, err = c.getObject(ctx, bucketName, objectName, opts) if err != nil { resCh <- getResponse{Error: err} return @@ -100,7 +102,7 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName etag = objectInfo.ETag // Read at least firstReq.Buffer bytes, if not we have // reached our EOF. - size, err := io.ReadFull(httpReader, req.Buffer) + size, err := readFull(httpReader, req.Buffer) if size > 0 && err == io.ErrUnexpectedEOF { // If an EOF happens after reading some but not // all the bytes ReadFull returns ErrUnexpectedEOF @@ -119,7 +121,7 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName // Remove range header if already set, for stat Operations to get original file size. delete(opts.headers, "Range") - objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts}) + objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions(opts)) if err != nil { resCh <- getResponse{ Error: err, @@ -136,10 +138,13 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName } else if req.settingObjectInfo { // Request is just to get objectInfo. // Remove range header if already set, for stat Operations to get original file size. delete(opts.headers, "Range") - if etag != "" { + // Check whether this is snowball + // if yes do not use If-Match feature + // it doesn't work. + if etag != "" && !snowball { opts.SetMatchETag(etag) } - objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts}) + objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions(opts)) if err != nil { resCh <- getResponse{ Error: err, @@ -159,7 +164,10 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName // new ones when they haven't been already. // All readAt requests are new requests. if req.DidOffsetChange || !req.beenRead { - if etag != "" { + // Check whether this is snowball + // if yes do not use If-Match feature + // it doesn't work. + if etag != "" && !snowball { opts.SetMatchETag(etag) } if httpReader != nil { @@ -173,7 +181,7 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName } else if req.Offset > 0 { // Range is set with respect to the offset. opts.SetRange(req.Offset, 0) } - httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts) + httpReader, objectInfo, _, err = c.getObject(ctx, bucketName, objectName, opts) if err != nil { resCh <- getResponse{ Error: err, @@ -184,8 +192,8 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName // Read at least req.Buffer bytes, if not we have // reached our EOF. - size, err := io.ReadFull(httpReader, req.Buffer) - if err == io.ErrUnexpectedEOF { + size, err := readFull(httpReader, req.Buffer) + if size > 0 && err == io.ErrUnexpectedEOF { // If an EOF happens after reading some but not // all the bytes ReadFull returns ErrUnexpectedEOF err = io.EOF @@ -310,7 +318,7 @@ func (o *Object) setOffset(bytesRead int64) error { // io.EOF upon end of file. func (o *Object) Read(b []byte) (n int, err error) { if o == nil { - return 0, ErrInvalidArgument("Object is nil") + return 0, errInvalidArgument("Object is nil") } // Locking. @@ -321,6 +329,7 @@ func (o *Object) Read(b []byte) (n int, err error) { if o.prevErr != nil || o.isClosed { return 0, o.prevErr } + // Create a new request. readReq := getRequest{ isReadOp: true, @@ -363,7 +372,7 @@ func (o *Object) Read(b []byte) (n int, err error) { // Stat returns the ObjectInfo structure describing Object. func (o *Object) Stat() (ObjectInfo, error) { if o == nil { - return ObjectInfo{}, ErrInvalidArgument("Object is nil") + return ObjectInfo{}, errInvalidArgument("Object is nil") } // Locking. o.mutex.Lock() @@ -395,7 +404,7 @@ func (o *Object) Stat() (ObjectInfo, error) { // file, that error is io.EOF. func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { if o == nil { - return 0, ErrInvalidArgument("Object is nil") + return 0, errInvalidArgument("Object is nil") } // Locking. @@ -403,10 +412,13 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { defer o.mutex.Unlock() // prevErr is error which was saved in previous operation. - if o.prevErr != nil || o.isClosed { + if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { return 0, o.prevErr } + // Set the current offset to ReadAt offset, because the current offset will be shifted at the end of this method. + o.currOffset = offset + // Can only compare offsets to size when size has been set. if o.objectInfoSet { // If offset is negative than we return io.EOF. @@ -469,23 +481,21 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { // underlying object is not closed. func (o *Object) Seek(offset int64, whence int) (n int64, err error) { if o == nil { - return 0, ErrInvalidArgument("Object is nil") + return 0, errInvalidArgument("Object is nil") } // Locking. o.mutex.Lock() defer o.mutex.Unlock() - if o.prevErr != nil { - // At EOF seeking is legal allow only io.EOF, for any other errors we return. - if o.prevErr != io.EOF { - return 0, o.prevErr - } + // At EOF seeking is legal allow only io.EOF, for any other errors we return. + if o.prevErr != nil && o.prevErr != io.EOF { + return 0, o.prevErr } // Negative offset is valid for whence of '2'. if offset < 0 && whence != 2 { - return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence)) + return 0, errInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence)) } // This is the first request. So before anything else @@ -509,7 +519,7 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) { // Switch through whence. switch whence { default: - return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) + return 0, errInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) case 0: if o.objectInfo.Size > -1 && offset > o.objectInfo.Size { return 0, io.EOF @@ -523,7 +533,7 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) { case 2: // If we don't know the object size return an error for io.SeekEnd if o.objectInfo.Size < 0 { - return 0, ErrInvalidArgument("Whence END is not supported when the object size is unknown") + return 0, errInvalidArgument("Whence END is not supported when the object size is unknown") } // Seeking to positive offset is valid for whence '2', but // since we are backing a Reader we have reached 'EOF' if @@ -533,7 +543,7 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) { } // Seeking to negative position not allowed for whence. if o.objectInfo.Size+offset < 0 { - return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) + return 0, errInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) } o.currOffset = o.objectInfo.Size + offset } @@ -554,7 +564,7 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) { // for subsequent Close() calls. func (o *Object) Close() (err error) { if o == nil { - return ErrInvalidArgument("Object is nil") + return errInvalidArgument("Object is nil") } // Locking. o.mutex.Lock() @@ -594,66 +604,43 @@ func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- // // For more information about the HTTP Range header. // go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. -func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) { +func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { // Validate input arguments. if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, ObjectInfo{}, err + return nil, ObjectInfo{}, nil, err } if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, ObjectInfo{}, err + return nil, ObjectInfo{}, nil, err + } + + urlValues := make(url.Values) + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) } // Execute GET on objectName. - resp, err := c.executeMethod(ctx, "GET", requestMetadata{ + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ bucketName: bucketName, objectName: objectName, + queryValues: urlValues, customHeader: opts.Header(), contentSHA256Hex: emptySHA256Hex, }) if err != nil { - return nil, ObjectInfo{}, err + return nil, ObjectInfo{}, nil, err } if resp != nil { if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + return nil, ObjectInfo{}, nil, httpRespToErrorResponse(resp, bucketName, objectName) } } - // Trim off the odd double quotes from ETag in the beginning and end. - md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") - md5sum = strings.TrimSuffix(md5sum, "\"") - - // Parse the date. - date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) + objectStat, err := ToObjectInfo(bucketName, objectName, resp.Header) if err != nil { - msg := "Last-Modified time format not recognized. " + reportIssue - return nil, ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: msg, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), - } - } - - // Get content-type. - contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) - if contentType == "" { - contentType = "application/octet-stream" - } - - objectStat := ObjectInfo{ - ETag: md5sum, - Key: objectName, - Size: resp.ContentLength, - LastModified: date, - ContentType: contentType, - // Extract only the relevant header keys describing the object. - // following function filters out a list of standard set of keys - // which are not part of object metadata. - Metadata: extractObjMetadata(resp.Header), + closeResponse(resp) + return nil, ObjectInfo{}, nil, err } // do not close body here, caller will close - return resp.Body, objectStat, nil + return resp.Body, objectStat, resp.Header, nil } diff --git a/api-get-options.go b/api-get-options.go index dbf062d61..5f3ed3656 100644 --- a/api-get-options.go +++ b/api-get-options.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ import ( "net/http" "time" - "github.com/minio/minio-go/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/encrypt" ) // GetObjectOptions are used to specify additional headers or options @@ -30,13 +30,12 @@ import ( type GetObjectOptions struct { headers map[string]string ServerSideEncryption encrypt.ServerSide + VersionID string } // StatObjectOptions are used to specify additional headers or options // during GET info/stat requests. -type StatObjectOptions struct { - GetObjectOptions -} +type StatObjectOptions = GetObjectOptions // Header returns the http.Header representation of the GET options. func (o GetObjectOptions) Header() http.Header { @@ -63,7 +62,7 @@ func (o *GetObjectOptions) Set(key, value string) { // SetMatchETag - set match etag. func (o *GetObjectOptions) SetMatchETag(etag string) error { if etag == "" { - return ErrInvalidArgument("ETag cannot be empty.") + return errInvalidArgument("ETag cannot be empty.") } o.Set("If-Match", "\""+etag+"\"") return nil @@ -72,7 +71,7 @@ func (o *GetObjectOptions) SetMatchETag(etag string) error { // SetMatchETagExcept - set match etag except. func (o *GetObjectOptions) SetMatchETagExcept(etag string) error { if etag == "" { - return ErrInvalidArgument("ETag cannot be empty.") + return errInvalidArgument("ETag cannot be empty.") } o.Set("If-None-Match", "\""+etag+"\"") return nil @@ -81,7 +80,7 @@ func (o *GetObjectOptions) SetMatchETagExcept(etag string) error { // SetUnmodified - set unmodified time since. func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error { if modTime.IsZero() { - return ErrInvalidArgument("Modified since cannot be empty.") + return errInvalidArgument("Modified since cannot be empty.") } o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) return nil @@ -90,7 +89,7 @@ func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error { // SetModified - set modified time since. func (o *GetObjectOptions) SetModified(modTime time.Time) error { if modTime.IsZero() { - return ErrInvalidArgument("Modified since cannot be empty.") + return errInvalidArgument("Modified since cannot be empty.") } o.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) return nil @@ -119,7 +118,7 @@ func (o *GetObjectOptions) SetRange(start, end int64) error { // bytes=-3-0 // bytes=-3--2 // are invalid. - return ErrInvalidArgument( + return errInvalidArgument( fmt.Sprintf( "Invalid range specified: start=%d end=%d", start, end)) diff --git a/api-get-policy.go b/api-get-policy.go deleted file mode 100644 index 12d4c590e..000000000 --- a/api-get-policy.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io/ioutil" - "net/http" - "net/url" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// GetBucketPolicy - get bucket policy at a given path. -func (c Client) GetBucketPolicy(bucketName string) (string, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - bucketPolicy, err := c.getBucketPolicy(bucketName) - if err != nil { - errResponse := ToErrorResponse(err) - if errResponse.Code == "NoSuchBucketPolicy" { - return "", nil - } - return "", err - } - return bucketPolicy, nil -} - -// Request server for current bucket policy. -func (c Client) getBucketPolicy(bucketName string) (string, error) { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("policy", "") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - - defer closeResponse(resp) - if err != nil { - return "", err - } - - if resp != nil { - if resp.StatusCode != http.StatusOK { - return "", httpRespToErrorResponse(resp, bucketName, "") - } - } - - bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - - policy := string(bucketPolicyBuf) - return policy, err -} diff --git a/api-list.go b/api-list.go index 2f1350a34..7996c11e9 100644 --- a/api-list.go +++ b/api-list.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,13 +19,11 @@ package minio import ( "context" - "errors" "fmt" "net/http" "net/url" - "strings" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/s3utils" ) // ListBuckets list all buckets owned by this authenticated user. @@ -34,13 +32,13 @@ import ( // allowed for listing buckets. // // api := client.New(....) -// for message := range api.ListBuckets() { +// for message := range api.ListBuckets(context.Background()) { // fmt.Println(message) // } // -func (c Client) ListBuckets() ([]BucketInfo, error) { +func (c Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { // Execute GET on service. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{contentSHA256Hex: emptySHA256Hex}) + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex}) defer closeResponse(resp) if err != nil { return nil, err @@ -60,27 +58,7 @@ func (c Client) ListBuckets() ([]BucketInfo, error) { /// Bucket Read Operations. -// ListObjectsV2 lists all objects matching the objectPrefix from -// the specified bucket. If recursion is enabled it would list -// all subdirectories and all its contents. -// -// Your input parameters are just bucketName, objectPrefix, recursive -// and a done channel for pro-actively closing the internal go -// routine. If you enable recursive as 'true' this function will -// return back all the objects in a given bucket name and object -// prefix. -// -// api := client.New(....) -// // Create a done channel. -// doneCh := make(chan struct{}) -// defer close(doneCh) -// // Recursively list all objects in 'mytestbucket' -// recursive := true -// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) { -// fmt.Println(message) -// } -// -func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { +func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix string, recursive, metadata bool, maxKeys int) <-chan ObjectInfo { // Allocate new list objects channel. objectStatCh := make(chan ObjectInfo, 1) // Default listing is delimited at "/" @@ -118,7 +96,8 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d var continuationToken string for { // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000, "") + result, err := c.listObjectsV2Query(ctx, bucketName, objectPrefix, continuationToken, + fetchOwner, metadata, delimiter, maxKeys) if err != nil { objectStatCh <- ObjectInfo{ Err: err, @@ -128,11 +107,12 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d // If contents are available loop through and send over channel. for _, object := range result.Contents { + object.ETag = trimEtag(object.ETag) select { // Send object content. case objectStatCh <- object: // If receives done from the caller, return here. - case <-doneCh: + case <-ctx.Done(): return } } @@ -142,12 +122,9 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d for _, obj := range result.CommonPrefixes { select { // Send object prefixes. - case objectStatCh <- ObjectInfo{ - Key: obj.Prefix, - Size: 0, - }: + case objectStatCh <- ObjectInfo{Key: obj.Prefix}: // If receives done from the caller, return here. - case <-doneCh: + case <-ctx.Done(): return } } @@ -175,8 +152,8 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d // ?delimiter - A delimiter is a character you use to group keys. // ?prefix - Limits the response to keys that begin with the specified prefix. // ?max-keys - Sets the maximum number of keys returned in the response body. -// ?start-after - Specifies the key to start after when listing objects in a bucket. -func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) { +// ?metadata - Specifies if we want metadata for the objects as part of list operation. +func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, maxkeys int) (ListBucketV2Result, error) { // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ListBucketV2Result{}, err @@ -192,6 +169,13 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s // Always set list-type in ListObjects V2 urlValues.Set("list-type", "2") + if metadata { + urlValues.Set("metadata", "true") + } + + // Always set encoding-type in ListObjects V2 + urlValues.Set("encoding-type", "url") + // Set object prefix, prefix value to be set to empty is okay. urlValues.Set("prefix", objectPrefix) @@ -208,20 +192,13 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s urlValues.Set("fetch-owner", "true") } - // maxkeys should default to 1000 or less. - if maxkeys == 0 || maxkeys > 1000 { - maxkeys = 1000 - } // Set max keys. - urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) - - // Set start-after - if startAfter != "" { - urlValues.Set("start-after", startAfter) + if maxkeys > 0 { + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) } // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ bucketName: bucketName, queryValues: urlValues, contentSHA256Hex: emptySHA256Hex, @@ -245,36 +222,31 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s // This is an additional verification check to make // sure proper responses are received. if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" { - return listBucketResult, errors.New("Truncated response should have continuation token set") + return listBucketResult, ErrorResponse{ + Code: "NotImplemented", + Message: "Truncated response should have continuation token set", + } + } + + for i, obj := range listBucketResult.Contents { + listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + for i, obj := range listBucketResult.CommonPrefixes { + listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } } // Success. return listBucketResult, nil } -// ListObjects - (List Objects) - List some objects or all recursively. -// -// ListObjects lists all objects matching the objectPrefix from -// the specified bucket. If recursion is enabled it would list -// all subdirectories and all its contents. -// -// Your input parameters are just bucketName, objectPrefix, recursive -// and a done channel for pro-actively closing the internal go -// routine. If you enable recursive as 'true' this function will -// return back all the objects in a given bucket name and object -// prefix. -// -// api := client.New(....) -// // Create a done channel. -// doneCh := make(chan struct{}) -// defer close(doneCh) -// // Recurively list all objects in 'mytestbucket' -// recursive := true -// for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) { -// fmt.Println(message) -// } -// -func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { +func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string, recursive bool, maxKeys int) <-chan ObjectInfo { // Allocate new list objects channel. objectStatCh := make(chan ObjectInfo, 1) // Default listing is delimited at "/" @@ -303,11 +275,11 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don // Initiate list objects goroutine here. go func(objectStatCh chan<- ObjectInfo) { defer close(objectStatCh) - // Save marker for next request. - var marker string + + marker := "" for { // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000) + result, err := c.listObjectsQuery(ctx, bucketName, objectPrefix, marker, delimiter, maxKeys) if err != nil { objectStatCh <- ObjectInfo{ Err: err, @@ -323,7 +295,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don // Send object content. case objectStatCh <- object: // If receives done from the caller, return here. - case <-doneCh: + case <-ctx.Done(): return } } @@ -331,14 +303,11 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don // Send all common prefixes if any. // NOTE: prefixes are only present if the request is delimited. for _, obj := range result.CommonPrefixes { - object := ObjectInfo{} - object.Key = obj.Prefix - object.Size = 0 select { // Send object prefixes. - case objectStatCh <- object: + case objectStatCh <- ObjectInfo{Key: obj.Prefix}: // If receives done from the caller, return here. - case <-doneCh: + case <-ctx.Done(): return } } @@ -357,6 +326,205 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don return objectStatCh } +func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix string, recursive bool, maxKeys int) <-chan ObjectInfo { + // Allocate new list objects channel. + resultCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if recursive { + // If recursive we do not delimit. + delimiter = "" + } + + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(resultCh) + resultCh <- ObjectInfo{ + Err: err, + } + return resultCh + } + + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(prefix); err != nil { + defer close(resultCh) + resultCh <- ObjectInfo{ + Err: err, + } + return resultCh + } + + // Initiate list objects goroutine here. + go func(resultCh chan<- ObjectInfo) { + defer close(resultCh) + + var ( + keyMarker = "" + versionIDMarker = "" + ) + + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectVersionsQuery(ctx, bucketName, prefix, keyMarker, versionIDMarker, delimiter, maxKeys) + if err != nil { + resultCh <- ObjectInfo{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, version := range result.Versions { + info := ObjectInfo{ + ETag: trimEtag(version.ETag), + Key: version.Key, + LastModified: version.LastModified, + Size: version.Size, + Owner: version.Owner, + StorageClass: version.StorageClass, + IsLatest: version.IsLatest, + VersionID: version.VersionID, + + IsDeleteMarker: version.isDeleteMarker, + } + select { + // Send object version info. + case resultCh <- info: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + select { + // Send object prefixes. + case resultCh <- ObjectInfo{Key: obj.Prefix}: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // If next key marker is present, save it for next request. + if result.NextKeyMarker != "" { + keyMarker = result.NextKeyMarker + } + + // If next version id marker is present, save it for next request. + if result.NextVersionIDMarker != "" { + versionIDMarker = result.NextVersionIDMarker + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(resultCh) + return resultCh +} + +// listObjectVersions - (List Object Versions) - List some or all (up to 1000) of the existing objects +// and their versions in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?key-marker - Specifies the key to start with when listing objects in a bucket. +// ?version-id-marker - Specifies the version id marker to start with when listing objects with versions in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int) (ListVersionsResult, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListVersionsResult{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(prefix); err != nil { + return ListVersionsResult{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Set versions to trigger versioning API + urlValues.Set("versions", "") + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", prefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set object marker. + if keyMarker != "" { + urlValues.Set("key-marker", keyMarker) + } + + // Set max keys. + if maxkeys > 0 { + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + } + + // Set version ID marker + if versionIDMarker != "" { + urlValues.Set("version-id-marker", versionIDMarker) + } + + // Always set encoding-type + urlValues.Set("encoding-type", "url") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListVersionsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListVersionsResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Decode ListVersionsResult XML. + listObjectVersionsOutput := ListVersionsResult{} + err = xmlDecoder(resp.Body, &listObjectVersionsOutput) + if err != nil { + return ListVersionsResult{}, err + } + + for i, obj := range listObjectVersionsOutput.Versions { + listObjectVersionsOutput.Versions[i].Key, err = decodeS3Name(obj.Key, listObjectVersionsOutput.EncodingType) + if err != nil { + return listObjectVersionsOutput, err + } + } + + for i, obj := range listObjectVersionsOutput.CommonPrefixes { + listObjectVersionsOutput.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listObjectVersionsOutput.EncodingType) + if err != nil { + return listObjectVersionsOutput, err + } + } + + if listObjectVersionsOutput.NextKeyMarker != "" { + listObjectVersionsOutput.NextKeyMarker, err = decodeS3Name(listObjectVersionsOutput.NextKeyMarker, listObjectVersionsOutput.EncodingType) + if err != nil { + return listObjectVersionsOutput, err + } + } + + return listObjectVersionsOutput, nil +} + // listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. // // You can use the request parameters as selection criteria to return a subset of the objects in a bucket. @@ -366,7 +534,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don // ?delimiter - A delimiter is a character you use to group keys. // ?prefix - Limits the response to keys that begin with the specified prefix. // ?max-keys - Sets the maximum number of keys returned in the response body. -func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) { +func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) { // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ListBucketResult{}, err @@ -390,15 +558,16 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit urlValues.Set("marker", objectMarker) } - // maxkeys should default to 1000 or less. - if maxkeys == 0 || maxkeys > 1000 { - maxkeys = 1000 - } // Set max keys. - urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + if maxkeys > 0 { + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + } + + // Always set encoding-type + urlValues.Set("encoding-type", "url") // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ bucketName: bucketName, queryValues: urlValues, contentSHA256Hex: emptySHA256Hex, @@ -418,38 +587,99 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit if err != nil { return listBucketResult, err } + + for i, obj := range listBucketResult.Contents { + listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + for i, obj := range listBucketResult.CommonPrefixes { + listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + if listBucketResult.NextMarker != "" { + listBucketResult.NextMarker, err = decodeS3Name(listBucketResult.NextMarker, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + return listBucketResult, nil } +// ListObjectsOptions holds all options of a list object request +type ListObjectsOptions struct { + // Include objects versions in the listing + WithVersions bool + // Include objects metadata in the listing + WithMetadata bool + // Only list objects with the prefix + Prefix string + // Ignore '/' delimiter + Recursive bool + // The maximum number of objects requested per + // batch, advanced use-case not useful for most + // applications + MaxKeys int + + // Use the deprecated list objects V1 API + UseV1 bool +} + +// ListObjects returns objects list after evaluating the passed options. +// +// api := client.New(....) +// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { +// fmt.Println(object) +// } +// +func (c Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { + if opts.WithVersions { + return c.listObjectVersions(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys) + } + + // Use legacy list objects v1 API + if opts.UseV1 { + return c.listObjects(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys) + } + + // Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1. + if location, ok := c.bucketLocCache.Get(bucketName); ok { + if location == "snowball" { + return c.listObjects(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys) + } + } + + return c.listObjectsV2(ctx, bucketName, opts.Prefix, opts.Recursive, opts.WithMetadata, opts.MaxKeys) +} + // ListIncompleteUploads - List incompletely uploaded multipart objects. // // ListIncompleteUploads lists all incompleted objects matching the // objectPrefix from the specified bucket. If recursion is enabled // it would list all subdirectories and all its contents. // -// Your input parameters are just bucketName, objectPrefix, recursive -// and a done channel to pro-actively close the internal go routine. +// Your input parameters are just bucketName, objectPrefix, recursive. // If you enable recursive as 'true' this function will return back all // the multipart objects in a given bucket name. // // api := client.New(....) -// // Create a done channel. -// doneCh := make(chan struct{}) -// defer close(doneCh) // // Recurively list all objects in 'mytestbucket' // recursive := true -// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) { +// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { // fmt.Println(message) // } -// -func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { - // Turn on size aggregation of individual parts. - isAggregateSize := true - return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh) +func (c Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { + return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive) } // listIncompleteUploads lists all incomplete uploads. -func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { +func (c Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { // Allocate channel for multipart uploads. objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) // Delimiter is set to "/" by default. @@ -481,48 +711,35 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive var uploadIDMarker string for { // list all multipart uploads. - result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000) + result, err := c.listMultipartUploadsQuery(ctx, bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 0) if err != nil { objectMultipartStatCh <- ObjectMultipartInfo{ Err: err, } return } - // Save objectMarker and uploadIDMarker for next request. objectMarker = result.NextKeyMarker uploadIDMarker = result.NextUploadIDMarker + // Send all multipart uploads. for _, obj := range result.Uploads { // Calculate total size of the uploaded parts if 'aggregateSize' is enabled. - if aggregateSize { - // Get total multipart size. - obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID) - if err != nil { - objectMultipartStatCh <- ObjectMultipartInfo{ - Err: err, - } - continue - } - } select { // Send individual uploads here. case objectMultipartStatCh <- obj: - // If done channel return here. - case <-doneCh: + // If the context is canceled + case <-ctx.Done(): return } } // Send all common prefixes if any. // NOTE: prefixes are only present if the request is delimited. for _, obj := range result.CommonPrefixes { - object := ObjectMultipartInfo{} - object.Key = obj.Prefix - object.Size = 0 select { // Send delimited prefixes here. - case objectMultipartStatCh <- object: - // If done channel return here. - case <-doneCh: + case objectMultipartStatCh <- ObjectMultipartInfo{Key: obj.Prefix, Size: 0}: + // If context is canceled. + case <-ctx.Done(): return } } @@ -534,9 +751,10 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive }(objectMultipartStatCh) // return. return objectMultipartStatCh + } -// listMultipartUploads - (List Multipart Uploads). +// listMultipartUploadsQuery - (List Multipart Uploads). // - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. // // You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. @@ -547,7 +765,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive // ?delimiter - A delimiter is a character you use to group keys. // ?prefix - Limits the response to keys that begin with the specified prefix. // ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. -func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { +func (c Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set uploads. @@ -567,15 +785,17 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, // Set delimiter, delimiter value to be set to empty is okay. urlValues.Set("delimiter", delimiter) + // Always set encoding-type + urlValues.Set("encoding-type", "url") + // maxUploads should be 1000 or less. - if maxUploads == 0 || maxUploads > 1000 { - maxUploads = 1000 + if maxUploads > 0 { + // Set max-uploads. + urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) } - // Set max-uploads. - urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) // Execute GET on bucketName to list multipart uploads. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ bucketName: bucketName, queryValues: urlValues, contentSHA256Hex: emptySHA256Hex, @@ -595,25 +815,49 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, if err != nil { return listMultipartUploadsResult, err } + + listMultipartUploadsResult.NextKeyMarker, err = decodeS3Name(listMultipartUploadsResult.NextKeyMarker, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + + listMultipartUploadsResult.NextUploadIDMarker, err = decodeS3Name(listMultipartUploadsResult.NextUploadIDMarker, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + + for i, obj := range listMultipartUploadsResult.Uploads { + listMultipartUploadsResult.Uploads[i].Key, err = decodeS3Name(obj.Key, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + } + + for i, obj := range listMultipartUploadsResult.CommonPrefixes { + listMultipartUploadsResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + } + return listMultipartUploadsResult, nil } // listObjectParts list all object parts recursively. -func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { +func (c Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { // Part number marker for the next batch of request. var nextPartNumberMarker int partsInfo = make(map[int]ObjectPart) for { // Get list of uploaded parts a maximum of 1000 per request. - listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000) + listObjPartsResult, err := c.listObjectPartsQuery(ctx, bucketName, objectName, uploadID, nextPartNumberMarker, 1000) if err != nil { return nil, err } // Append to parts info. for _, part := range listObjPartsResult.ObjectParts { // Trim off the odd double quotes from ETag in the beginning and end. - part.ETag = strings.TrimPrefix(part.ETag, "\"") - part.ETag = strings.TrimSuffix(part.ETag, "\"") + part.ETag = trimEtag(part.ETag) partsInfo[part.PartNumber] = part } // Keep part number marker, for the next iteration. @@ -629,17 +873,12 @@ func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsI } // findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name. -func (c Client) findUploadIDs(bucketName, objectName string) ([]string, error) { +func (c Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) { var uploadIDs []string // Make list incomplete uploads recursive. isRecursive := true - // Turn off size aggregation of individual parts, in this request. - isAggregateSize := false - // Create done channel to cleanup the routine. - doneCh := make(chan struct{}) - defer close(doneCh) // List all incomplete uploads. - for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) { + for mpUpload := range c.listIncompleteUploads(ctx, bucketName, objectName, isRecursive) { if mpUpload.Err != nil { return nil, mpUpload.Err } @@ -651,19 +890,6 @@ func (c Client) findUploadIDs(bucketName, objectName string) ([]string, error) { return uploadIDs, nil } -// getTotalMultipartSize - calculate total uploaded size for the a given multipart object. -func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) { - // Iterate over all parts and aggregate the size. - partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) - if err != nil { - return 0, err - } - for _, partInfo := range partsInfo { - size += partInfo.Size - } - return size, nil -} - // listObjectPartsQuery (List Parts query) // - lists some or all (up to 1000) parts that have been uploaded // for a specific multipart upload @@ -674,7 +900,7 @@ func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) ( // ?part-number-marker - Specifies the part after which listing should // begin. // ?max-parts - Maximum parts to be listed per request. -func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { +func (c Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set part number marker. @@ -683,14 +909,13 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa urlValues.Set("uploadId", uploadID) // maxParts should be 1000 or less. - if maxParts == 0 || maxParts > 1000 { - maxParts = 1000 + if maxParts > 0 { + // Set max parts. + urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) } - // Set max parts. - urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) // Execute GET on objectName to get list of parts. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ bucketName: bucketName, objectName: objectName, queryValues: urlValues, @@ -713,3 +938,13 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa } return listObjectPartsResult, nil } + +// Decode an S3 object name according to the encoding type +func decodeS3Name(name, encodingType string) (string, error) { + switch encodingType { + case "url": + return url.QueryUnescape(name) + default: + return name, nil + } +} diff --git a/api-notification.go b/api-notification.go deleted file mode 100644 index 1c01e362b..000000000 --- a/api-notification.go +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bufio" - "context" - "encoding/json" - "io" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// GetBucketNotification - get bucket notification at a given path. -func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return BucketNotification{}, err - } - notification, err := c.getBucketNotification(bucketName) - if err != nil { - return BucketNotification{}, err - } - return notification, nil -} - -// Request server for notification rules. -func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) { - urlValues := make(url.Values) - urlValues.Set("notification", "") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - - defer closeResponse(resp) - if err != nil { - return BucketNotification{}, err - } - return processBucketNotificationResponse(bucketName, resp) - -} - -// processes the GetNotification http response from the server. -func processBucketNotificationResponse(bucketName string, resp *http.Response) (BucketNotification, error) { - if resp.StatusCode != http.StatusOK { - errResponse := httpRespToErrorResponse(resp, bucketName, "") - return BucketNotification{}, errResponse - } - var bucketNotification BucketNotification - err := xmlDecoder(resp.Body, &bucketNotification) - if err != nil { - return BucketNotification{}, err - } - return bucketNotification, nil -} - -// Indentity represents the user id, this is a compliance field. -type identity struct { - PrincipalID string `json:"principalId"` -} - -// Notification event bucket metadata. -type bucketMeta struct { - Name string `json:"name"` - OwnerIdentity identity `json:"ownerIdentity"` - ARN string `json:"arn"` -} - -// Notification event object metadata. -type objectMeta struct { - Key string `json:"key"` - Size int64 `json:"size,omitempty"` - ETag string `json:"eTag,omitempty"` - VersionID string `json:"versionId,omitempty"` - Sequencer string `json:"sequencer"` -} - -// Notification event server specific metadata. -type eventMeta struct { - SchemaVersion string `json:"s3SchemaVersion"` - ConfigurationID string `json:"configurationId"` - Bucket bucketMeta `json:"bucket"` - Object objectMeta `json:"object"` -} - -// sourceInfo represents information on the client that -// triggered the event notification. -type sourceInfo struct { - Host string `json:"host"` - Port string `json:"port"` - UserAgent string `json:"userAgent"` -} - -// NotificationEvent represents an Amazon an S3 bucket notification event. -type NotificationEvent struct { - EventVersion string `json:"eventVersion"` - EventSource string `json:"eventSource"` - AwsRegion string `json:"awsRegion"` - EventTime string `json:"eventTime"` - EventName string `json:"eventName"` - UserIdentity identity `json:"userIdentity"` - RequestParameters map[string]string `json:"requestParameters"` - ResponseElements map[string]string `json:"responseElements"` - S3 eventMeta `json:"s3"` - Source sourceInfo `json:"source"` -} - -// NotificationInfo - represents the collection of notification events, additionally -// also reports errors if any while listening on bucket notifications. -type NotificationInfo struct { - Records []NotificationEvent - Err error -} - -// ListenBucketNotification - listen on bucket notifications. -func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo { - notificationInfoCh := make(chan NotificationInfo, 1) - // Only success, start a routine to start reading line by line. - go func(notificationInfoCh chan<- NotificationInfo) { - defer close(notificationInfoCh) - - // Validate the bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - notificationInfoCh <- NotificationInfo{ - Err: err, - } - return - } - - // Check ARN partition to verify if listening bucket is supported - if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) { - notificationInfoCh <- NotificationInfo{ - Err: ErrAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"), - } - return - } - - // Continuously run and listen on bucket notification. - // Create a done channel to control 'ListObjects' go routine. - retryDoneCh := make(chan struct{}, 1) - - // Indicate to our routine to exit cleanly upon return. - defer close(retryDoneCh) - - // Wait on the jitter retry loop. - for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) { - urlValues := make(url.Values) - urlValues.Set("prefix", prefix) - urlValues.Set("suffix", suffix) - urlValues["events"] = events - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - if err != nil { - notificationInfoCh <- NotificationInfo{ - Err: err, - } - return - } - - // Validate http response, upon error return quickly. - if resp.StatusCode != http.StatusOK { - errResponse := httpRespToErrorResponse(resp, bucketName, "") - notificationInfoCh <- NotificationInfo{ - Err: errResponse, - } - return - } - - // Initialize a new bufio scanner, to read line by line. - bio := bufio.NewScanner(resp.Body) - - // Close the response body. - defer resp.Body.Close() - - // Unmarshal each line, returns marshalled values. - for bio.Scan() { - var notificationInfo NotificationInfo - if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { - continue - } - // Send notificationInfo - select { - case notificationInfoCh <- notificationInfo: - case <-doneCh: - return - } - } - // Look for any underlying errors. - if err = bio.Err(); err != nil { - // For an unexpected connection drop from server, we close the body - // and re-connect. - if err == io.ErrUnexpectedEOF { - resp.Body.Close() - } - } - } - }(notificationInfoCh) - - // Returns the notification info channel, for caller to start reading from. - return notificationInfoCh -} diff --git a/api-object-legal-hold.go b/api-object-legal-hold.go new file mode 100644 index 000000000..b139c1687 --- /dev/null +++ b/api-object-legal-hold.go @@ -0,0 +1,176 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// objectLegalHold - object legal hold specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTObjectPUTLegalHold.html +type objectLegalHold struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"LegalHold"` + Status LegalHoldStatus `xml:"Status,omitempty"` +} + +// PutObjectLegalHoldOptions represents options specified by user for PutObjectLegalHold call +type PutObjectLegalHoldOptions struct { + VersionID string + Status *LegalHoldStatus +} + +// GetObjectLegalHoldOptions represents options specified by user for GetObjectLegalHold call +type GetObjectLegalHoldOptions struct { + VersionID string +} + +// LegalHoldStatus - object legal hold status. +type LegalHoldStatus string + +const ( + // LegalHoldEnabled indicates legal hold is enabled + LegalHoldEnabled LegalHoldStatus = "ON" + + // LegalHoldDisabled indicates legal hold is disabled + LegalHoldDisabled LegalHoldStatus = "OFF" +) + +func (r LegalHoldStatus) String() string { + return string(r) +} + +// IsValid - check whether this legal hold status is valid or not. +func (r LegalHoldStatus) IsValid() bool { + return r == LegalHoldEnabled || r == LegalHoldDisabled +} + +func newObjectLegalHold(status *LegalHoldStatus) (*objectLegalHold, error) { + if status == nil { + return nil, fmt.Errorf("Status not set") + } + if !status.IsValid() { + return nil, fmt.Errorf("invalid legal hold status `%v`", status) + } + legalHold := &objectLegalHold{ + Status: *status, + } + return legalHold, nil +} + +// PutObjectLegalHold : sets object legal hold for a given object and versionID. +func (c Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("legal-hold", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + lh, err := newObjectLegalHold(opts.Status) + if err != nil { + return err + } + + lhData, err := xml.Marshal(lh) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: bytes.NewReader(lhData), + contentLength: int64(len(lhData)), + contentMD5Base64: sumMD5Base64(lhData), + contentSHA256Hex: sum256Hex(lhData), + } + + // Execute PUT Object Legal Hold. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return nil +} + +// GetObjectLegalHold gets legal-hold status of given object. +func (c Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + urlValues := make(url.Values) + urlValues.Set("legal-hold", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + lh := &objectLegalHold{} + if err = xml.NewDecoder(resp.Body).Decode(lh); err != nil { + return nil, err + } + + return &lh.Status, nil +} diff --git a/api-object-lock.go b/api-object-lock.go new file mode 100644 index 000000000..29f52b054 --- /dev/null +++ b/api-object-lock.go @@ -0,0 +1,241 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// RetentionMode - object retention mode. +type RetentionMode string + +const ( + // Governance - governance mode. + Governance RetentionMode = "GOVERNANCE" + + // Compliance - compliance mode. + Compliance RetentionMode = "COMPLIANCE" +) + +func (r RetentionMode) String() string { + return string(r) +} + +// IsValid - check whether this retention mode is valid or not. +func (r RetentionMode) IsValid() bool { + return r == Governance || r == Compliance +} + +// ValidityUnit - retention validity unit. +type ValidityUnit string + +const ( + // Days - denotes no. of days. + Days ValidityUnit = "DAYS" + + // Years - denotes no. of years. + Years ValidityUnit = "YEARS" +) + +func (unit ValidityUnit) String() string { + return string(unit) +} + +// IsValid - check whether this validity unit is valid or not. +func (unit ValidityUnit) isValid() bool { + return unit == Days || unit == Years +} + +// Retention - bucket level retention configuration. +type Retention struct { + Mode RetentionMode + Validity time.Duration +} + +func (r Retention) String() string { + return fmt.Sprintf("{Mode:%v, Validity:%v}", r.Mode, r.Validity) +} + +// IsEmpty - returns whether retention is empty or not. +func (r Retention) IsEmpty() bool { + return r.Mode == "" || r.Validity == 0 +} + +// objectLockConfig - object lock configuration specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html +type objectLockConfig struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"ObjectLockConfiguration"` + ObjectLockEnabled string `xml:"ObjectLockEnabled"` + Rule *struct { + DefaultRetention struct { + Mode RetentionMode `xml:"Mode"` + Days *uint `xml:"Days"` + Years *uint `xml:"Years"` + } `xml:"DefaultRetention"` + } `xml:"Rule,omitempty"` +} + +func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit) (*objectLockConfig, error) { + config := &objectLockConfig{ + ObjectLockEnabled: "Enabled", + } + + if mode != nil && validity != nil && unit != nil { + if !mode.IsValid() { + return nil, fmt.Errorf("invalid retention mode `%v`", mode) + } + + if !unit.isValid() { + return nil, fmt.Errorf("invalid validity unit `%v`", unit) + } + + config.Rule = &struct { + DefaultRetention struct { + Mode RetentionMode `xml:"Mode"` + Days *uint `xml:"Days"` + Years *uint `xml:"Years"` + } `xml:"DefaultRetention"` + }{} + + config.Rule.DefaultRetention.Mode = *mode + if *unit == Days { + config.Rule.DefaultRetention.Days = validity + } else { + config.Rule.DefaultRetention.Years = validity + } + + return config, nil + } + + if mode == nil && validity == nil && unit == nil { + return config, nil + } + + return nil, fmt.Errorf("all of retention mode, validity and validity unit must be passed") +} + +// SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. +func (c Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("object-lock", "") + + config, err := newObjectLockConfig(mode, validity, unit) + if err != nil { + return err + } + + configData, err := xml.Marshal(config) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(configData), + contentLength: int64(len(configData)), + contentMD5Base64: sumMD5Base64(configData), + contentSHA256Hex: sum256Hex(configData), + } + + // Execute PUT bucket object lock configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// GetObjectLockConfig gets object lock configuration of given bucket. +func (c Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", nil, nil, nil, err + } + + urlValues := make(url.Values) + urlValues.Set("object-lock", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return "", nil, nil, nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", nil, nil, nil, httpRespToErrorResponse(resp, bucketName, "") + } + } + config := &objectLockConfig{} + if err = xml.NewDecoder(resp.Body).Decode(config); err != nil { + return "", nil, nil, nil, err + } + + if config.Rule != nil { + mode = &config.Rule.DefaultRetention.Mode + if config.Rule.DefaultRetention.Days != nil { + validity = config.Rule.DefaultRetention.Days + days := Days + unit = &days + } else { + validity = config.Rule.DefaultRetention.Years + years := Years + unit = &years + } + return config.ObjectLockEnabled, mode, validity, unit, nil + } + return config.ObjectLockEnabled, nil, nil, nil, nil +} + +// GetBucketObjectLockConfig gets object lock configuration of given bucket. +func (c Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { + _, mode, validity, unit, err = c.GetObjectLockConfig(ctx, bucketName) + return mode, validity, unit, err +} + +// SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. +func (c Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { + return c.SetBucketObjectLockConfig(ctx, bucketName, mode, validity, unit) +} diff --git a/api-object-retention.go b/api-object-retention.go new file mode 100644 index 000000000..54f2762de --- /dev/null +++ b/api-object-retention.go @@ -0,0 +1,165 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// objectRetention - object retention specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html +type objectRetention struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"Retention"` + Mode RetentionMode `xml:"Mode,omitempty"` + RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601" xml:"RetainUntilDate,omitempty"` +} + +func newObjectRetention(mode *RetentionMode, date *time.Time) (*objectRetention, error) { + objectRetention := &objectRetention{} + + if date != nil && !date.IsZero() { + objectRetention.RetainUntilDate = date + } + if mode != nil { + if !mode.IsValid() { + return nil, fmt.Errorf("invalid retention mode `%v`", mode) + } + objectRetention.Mode = *mode + } + + return objectRetention, nil +} + +// PutObjectRetentionOptions represents options specified by user for PutObject call +type PutObjectRetentionOptions struct { + GovernanceBypass bool + Mode *RetentionMode + RetainUntilDate *time.Time + VersionID string +} + +// PutObjectRetention sets object retention for a given object and versionID. +func (c Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("retention", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + retention, err := newObjectRetention(opts.Mode, opts.RetainUntilDate) + if err != nil { + return err + } + + retentionData, err := xml.Marshal(retention) + if err != nil { + return err + } + + // Build headers. + headers := make(http.Header) + + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: bytes.NewReader(retentionData), + contentLength: int64(len(retentionData)), + contentMD5Base64: sumMD5Base64(retentionData), + contentSHA256Hex: sum256Hex(retentionData), + customHeader: headers, + } + + // Execute PUT Object Retention. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return nil +} + +// GetObjectRetention gets retention of given object. +func (c Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, nil, err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, nil, err + } + urlValues := make(url.Values) + urlValues.Set("retention", "") + if versionID != "" { + urlValues.Set("versionId", versionID) + } + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return nil, nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + retention := &objectRetention{} + if err = xml.NewDecoder(resp.Body).Decode(retention); err != nil { + return nil, nil, err + } + + return &retention.Mode, retention.RetainUntilDate, nil +} diff --git a/api-object-tagging.go b/api-object-tagging.go new file mode 100644 index 000000000..2709efcd1 --- /dev/null +++ b/api-object-tagging.go @@ -0,0 +1,157 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// PutObjectTaggingOptions holds an object version id +// to update tag(s) of a specific object version +type PutObjectTaggingOptions struct { + VersionID string +} + +// PutObjectTagging replaces or creates object tag(s) and can target +// a specific object version in a versioned bucket. +func (c Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + reqBytes, err := xml.Marshal(otags) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: bytes.NewReader(reqBytes), + contentLength: int64(len(reqBytes)), + contentMD5Base64: sumMD5Base64(reqBytes), + } + + // Execute PUT to set a object tagging. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return nil +} + +// GetObjectTaggingOptions holds the object version ID +// to fetch the tagging key/value pairs +type GetObjectTaggingOptions struct { + VersionID string +} + +// GetObjectTagging fetches object tag(s) with options to target +// a specific object version in a versioned bucket. +func (c Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Execute GET on object to get object tag(s) + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + return tags.ParseObjectXML(resp.Body) +} + +// RemoveObjectTaggingOptions holds the version id of the object to remove +type RemoveObjectTaggingOptions struct { + VersionID string +} + +// RemoveObjectTagging removes object tag(s) with options to control a specific object +// version in a versioned bucket +func (c Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Execute DELETE on object to remove object tag(s) + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + // S3 returns "204 No content" after Object tag deletion. + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return err +} diff --git a/api-presigned.go b/api-presigned.go index a2c060786..80c363da5 100644 --- a/api-presigned.go +++ b/api-presigned.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,21 +18,22 @@ package minio import ( + "context" "errors" "net/http" "net/url" "time" - "github.com/minio/minio-go/pkg/s3signer" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/signer" ) // presignURL - Returns a presigned URL for an input 'method'. // Expires maximum is 7days - ie. 604800 and minimum is 1. -func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { +func (c Client) presignURL(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { // Input validation. if method == "" { - return nil, ErrInvalidArgument("method cannot be empty.") + return nil, errInvalidArgument("method cannot be empty.") } if err = s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err @@ -54,7 +55,7 @@ func (c Client) presignURL(method string, bucketName string, objectName string, // Instantiate a new request. // Since expires is set newRequest will presign the request. var req *http.Request - if req, err = c.newRequest(method, reqMetadata); err != nil { + if req, err = c.newRequest(ctx, method, reqMetadata); err != nil { return nil, err } return req.URL, nil @@ -64,43 +65,43 @@ func (c Client) presignURL(method string, bucketName string, objectName string, // data without credentials. URL can have a maximum expiry of // upto 7days or a minimum of 1sec. Additionally you can override // a set of response headers using the query parameters. -func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { +func (c Client) PresignedGetObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { if err = s3utils.CheckValidObjectName(objectName); err != nil { return nil, err } - return c.presignURL("GET", bucketName, objectName, expires, reqParams) + return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams) } -// PresignedHeadObject - Returns a presigned URL to access object -// metadata without credentials. URL can have a maximum expiry of -// upto 7days or a minimum of 1sec. Additionally you can override +// PresignedHeadObject - Returns a presigned URL to access +// object metadata without credentials. URL can have a maximum expiry +// of upto 7days or a minimum of 1sec. Additionally you can override // a set of response headers using the query parameters. -func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { +func (c Client) PresignedHeadObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { if err = s3utils.CheckValidObjectName(objectName); err != nil { return nil, err } - return c.presignURL("HEAD", bucketName, objectName, expires, reqParams) + return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams) } // PresignedPutObject - Returns a presigned URL to upload an object // without credentials. URL can have a maximum expiry of upto 7days // or a minimum of 1sec. -func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { +func (c Client) PresignedPutObject(ctx context.Context, bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { if err = s3utils.CheckValidObjectName(objectName); err != nil { return nil, err } - return c.presignURL("PUT", bucketName, objectName, expires, nil) + return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil) } // Presign - returns a presigned URL for any http method of your choice // along with custom request params. URL can have a maximum expiry of // upto 7days or a minimum of 1sec. -func (c Client) Presign(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - return c.presignURL(method, bucketName, objectName, expires, reqParams) +func (c Client) Presign(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams) } // PresignedPostPolicy - Returns POST urlString, form data to upload an object. -func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) { +func (c Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) { // Validate input arguments. if p.expiration.IsZero() { return nil, nil, errors.New("Expiration time must be specified") @@ -114,7 +115,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str bucketName := p.formData["bucket"] // Fetch the bucket location. - location, err := c.getBucketLocation(bucketName) + location, err := c.getBucketLocation(ctx, bucketName) if err != nil { return nil, nil, err } @@ -140,7 +141,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str ) if signerType.IsAnonymous() { - return nil, nil, ErrInvalidArgument("Presigned operations are not supported for anonymous credentials") + return nil, nil, errInvalidArgument("Presigned operations are not supported for anonymous credentials") } // Keep time. @@ -157,7 +158,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str p.formData["AWSAccessKeyId"] = accessKeyID } // Sign the policy. - p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, secretAccessKey) + p.formData["signature"] = signer.PostPresignSignatureV2(policyBase64, secretAccessKey) return u, p.formData, nil } @@ -180,7 +181,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str } // Add a credential policy. - credential := s3signer.GetCredential(accessKeyID, location, t) + credential := signer.GetCredential(accessKeyID, location, t, signer.ServiceTypeS3) if err = p.addNewPolicy(policyCondition{ matchType: "eq", condition: "$x-amz-credential", @@ -210,6 +211,6 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str if sessionToken != "" { p.formData["x-amz-security-token"] = sessionToken } - p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location) + p.formData["x-amz-signature"] = signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location) return u, p.formData, nil } diff --git a/api-put-bucket.go b/api-put-bucket.go index 33dc0cf3d..df9fe98af 100644 --- a/api-put-bucket.go +++ b/api-put-bucket.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,24 +21,28 @@ import ( "bytes" "context" "encoding/xml" - "io/ioutil" "net/http" - "net/url" - "strings" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/s3utils" ) /// Bucket operations +func (c Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { + // Validate the input arguments. + if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { + return err + } -// MakeBucket creates a new bucket with bucketName. -// -// Location is an optional argument, by default all buckets are -// created in US Standard Region. -// -// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html -// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations -func (c Client) MakeBucket(bucketName string, location string) (err error) { + err = c.doMakeBucket(ctx, bucketName, opts.Region, opts.ObjectLocking) + if err != nil && (opts.Region == "" || opts.Region == "us-east-1") { + if resp, ok := err.(ErrorResponse); ok && resp.Code == "AuthorizationHeaderMalformed" && resp.Region != "" { + err = c.doMakeBucket(ctx, bucketName, resp.Region, opts.ObjectLocking) + } + } + return err +} + +func (c Client) doMakeBucket(ctx context.Context, bucketName string, location string, objectLockEnabled bool) (err error) { defer func() { // Save the location into cache on a successful makeBucket response. if err == nil { @@ -46,11 +50,6 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) { } }() - // Validate the input arguments. - if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { - return err - } - // If location is empty, treat is a default region 'us-east-1'. if location == "" { location = "us-east-1" @@ -66,6 +65,12 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) { bucketLocation: location, } + if objectLockEnabled { + headers := make(http.Header) + headers.Add("x-amz-bucket-object-lock-enabled", "true") + reqMetadata.customHeader = headers + } + // If location is not 'us-east-1' create bucket location config. if location != "us-east-1" && location != "" { createBucketConfig := createBucketConfiguration{} @@ -82,7 +87,7 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) { } // Execute PUT to create a new bucket. - resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) defer closeResponse(resp) if err != nil { return err @@ -98,209 +103,21 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) { return nil } -// SetBucketPolicy set the access permissions on an existing bucket. -func (c Client) SetBucketPolicy(bucketName, policy string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // If policy is empty then delete the bucket policy. - if policy == "" { - return c.removeBucketPolicy(bucketName) - } - - // Save the updated policies. - return c.putBucketPolicy(bucketName, policy) -} - -// Saves a new bucket policy. -func (c Client) putBucketPolicy(bucketName, policy string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("policy", "") - - // Content-length is mandatory for put policy request - policyReader := strings.NewReader(policy) - b, err := ioutil.ReadAll(policyReader) - if err != nil { - return err - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: policyReader, - contentLength: int64(len(b)), - } - - // Execute PUT to upload a new bucket policy. - resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// Removes all policies on a bucket. -func (c Client) removeBucketPolicy(bucketName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("policy", "") - - // Execute DELETE on objectName. - resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - return nil +// MakeBucketOptions holds all options to tweak bucket creation +type MakeBucketOptions struct { + // Bucket location + Region string + // Enable object locking + ObjectLocking bool } -// SetBucketLifecycle set the lifecycle on an existing bucket. -func (c Client) SetBucketLifecycle(bucketName, lifecycle string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // If lifecycle is empty then delete it. - if lifecycle == "" { - return c.removeBucketLifecycle(bucketName) - } - - // Save the updated lifecycle. - return c.putBucketLifecycle(bucketName, lifecycle) -} - -// Saves a new bucket lifecycle. -func (c Client) putBucketLifecycle(bucketName, lifecycle string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("lifecycle", "") - - // Content-length is mandatory for put lifecycle request - lifecycleReader := strings.NewReader(lifecycle) - b, err := ioutil.ReadAll(lifecycleReader) - if err != nil { - return err - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: lifecycleReader, - contentLength: int64(len(b)), - contentMD5Base64: sumMD5Base64(b), - } - - // Execute PUT to upload a new bucket lifecycle. - resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// Remove lifecycle from a bucket. -func (c Client) removeBucketLifecycle(bucketName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("lifecycle", "") - - // Execute DELETE on objectName. - resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - return nil -} - -// SetBucketNotification saves a new bucket notification. -func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("notification", "") - - notifBytes, err := xml.Marshal(bucketNotification) - if err != nil { - return err - } - - notifBuffer := bytes.NewReader(notifBytes) - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: notifBuffer, - contentLength: int64(len(notifBytes)), - contentMD5Base64: sumMD5Base64(notifBytes), - contentSHA256Hex: sum256Hex(notifBytes), - } - - // Execute PUT to upload a new bucket notification. - resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config -func (c Client) RemoveAllBucketNotification(bucketName string) error { - return c.SetBucketNotification(bucketName, BucketNotification{}) +// MakeBucket creates a new bucket with bucketName with a context to control cancellations and timeouts. +// +// Location is an optional argument, by default all buckets are +// created in US Standard Region. +// +// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html +// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations +func (c Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { + return c.makeBucket(ctx, bucketName, opts) } diff --git a/api-put-object-common.go b/api-put-object-common.go index c16c3c69a..3d0408e53 100644 --- a/api-put-object-common.go +++ b/api-put-object-common.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,7 +23,7 @@ import ( "math" "os" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/s3utils" ) // Verify if reader is *minio.Object @@ -34,26 +34,25 @@ func isObject(reader io.Reader) (ok bool) { // Verify if reader is a generic ReaderAt func isReadAt(reader io.Reader) (ok bool) { - _, ok = reader.(io.ReaderAt) + var v *os.File + v, ok = reader.(*os.File) if ok { - var v *os.File - v, ok = reader.(*os.File) - if ok { - // Stdin, Stdout and Stderr all have *os.File type - // which happen to also be io.ReaderAt compatible - // we need to add special conditions for them to - // be ignored by this function. - for _, f := range []string{ - "/dev/stdin", - "/dev/stdout", - "/dev/stderr", - } { - if f == v.Name() { - ok = false - break - } + // Stdin, Stdout and Stderr all have *os.File type + // which happen to also be io.ReaderAt compatible + // we need to add special conditions for them to + // be ignored by this function. + for _, f := range []string{ + "/dev/stdin", + "/dev/stdout", + "/dev/stderr", + } { + if f == v.Name() { + ok = false + break } } + } else { + _, ok = reader.(io.ReaderAt) } return } @@ -65,23 +64,61 @@ func isReadAt(reader io.Reader) (ok bool) { // object storage it will have the following parameters as constants. // // maxPartsCount - 10000 -// minPartSize - 64MiB +// minPartSize - 128MiB // maxMultipartPutObjectSize - 5TiB // -func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { +func optimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { // object size is '-1' set it to 5TiB. + var unknownSize bool if objectSize == -1 { + unknownSize = true objectSize = maxMultipartPutObjectSize } + // object size is larger than supported maximum. if objectSize > maxMultipartPutObjectSize { - err = ErrEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") + err = errEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") return } - // Use floats for part size for all calculations to avoid - // overflows during float64 to int64 conversions. - partSizeFlt := math.Ceil(float64(objectSize / maxPartsCount)) - partSizeFlt = math.Ceil(partSizeFlt/minPartSize) * minPartSize + + var partSizeFlt float64 + if configuredPartSize > 0 { + if int64(configuredPartSize) > objectSize { + err = errEntityTooLarge(int64(configuredPartSize), objectSize, "", "") + return + } + + if !unknownSize { + if objectSize > (int64(configuredPartSize) * maxPartsCount) { + err = errInvalidArgument("Part size * max_parts(10000) is lesser than input objectSize.") + return + } + } + + if configuredPartSize < absMinPartSize { + err = errInvalidArgument("Input part size is smaller than allowed minimum of 5MiB.") + return + } + + if configuredPartSize > maxPartSize { + err = errInvalidArgument("Input part size is bigger than allowed maximum of 5GiB.") + return + } + + partSizeFlt = float64(configuredPartSize) + if unknownSize { + // If input has unknown size and part size is configured + // keep it to maximum allowed as per 10000 parts. + objectSize = int64(configuredPartSize) * maxPartsCount + } + } else { + configuredPartSize = minPartSize + // Use floats for part size for all calculations to avoid + // overflows during float64 to int64 conversions. + partSizeFlt = float64(objectSize / maxPartsCount) + partSizeFlt = math.Ceil(partSizeFlt/float64(configuredPartSize)) * float64(configuredPartSize) + } + // Total parts count. totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt)) // Part size. diff --git a/api-put-object-context.go b/api-put-object-context.go deleted file mode 100644 index ff4663e2f..000000000 --- a/api-put-object-context.go +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" -) - -// PutObjectWithContext - Identical to PutObject call, but accepts context to facilitate request cancellation. -func (c Client) PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, - opts PutObjectOptions) (n int64, err error) { - err = opts.validate() - if err != nil { - return 0, err - } - return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) -} diff --git a/api-put-object-copy.go b/api-put-object-copy.go index 21322ef6a..9af036ec0 100644 --- a/api-put-object-copy.go +++ b/api-put-object-copy.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017, 2018 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017, 2018 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,62 +22,56 @@ import ( "io" "io/ioutil" "net/http" - - "github.com/minio/minio-go/pkg/encrypt" ) // CopyObject - copy a source object into a new object -func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error { - return c.CopyObjectWithProgress(dst, src, nil) -} - -// CopyObjectWithProgress - copy a source object into a new object, optionally takes -// progress bar input to notify current progress. -func (c Client) CopyObjectWithProgress(dst DestinationInfo, src SourceInfo, progress io.Reader) error { - header := make(http.Header) - for k, v := range src.Headers { - header[k] = v - } - - var err error - var size int64 - // If progress bar is specified, size should be requested as well initiate a StatObject request. - if progress != nil { - size, _, _, err = src.getProps(c) - if err != nil { - return err - } +func (c Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) { + if err := src.validate(); err != nil { + return UploadInfo{}, err } - if src.encryption != nil { - encrypt.SSECopy(src.encryption).Marshal(header) + if err := dst.validate(); err != nil { + return UploadInfo{}, err } - if dst.encryption != nil { - dst.encryption.Marshal(header) - } - for k, v := range dst.getUserMetaHeadersMap(true) { - header.Set(k, v) - } + header := make(http.Header) + dst.Marshal(header) + src.Marshal(header) - resp, err := c.executeMethod(context.Background(), "PUT", requestMetadata{ - bucketName: dst.bucket, - objectName: dst.object, + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: dst.Bucket, + objectName: dst.Object, customHeader: header, }) if err != nil { - return err + return UploadInfo{}, err } defer closeResponse(resp) if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, dst.bucket, dst.object) + return UploadInfo{}, httpRespToErrorResponse(resp, dst.Bucket, dst.Object) } // Update the progress properly after successful copy. - if progress != nil { - io.CopyN(ioutil.Discard, progress, size) + if dst.Progress != nil { + io.Copy(ioutil.Discard, io.LimitReader(dst.Progress, dst.Size)) } - return nil + cpObjRes := copyObjectResult{} + if err = xmlDecoder(resp.Body, &cpObjRes); err != nil { + return UploadInfo{}, err + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) + + return UploadInfo{ + Bucket: dst.Bucket, + Key: dst.Object, + LastModified: cpObjRes.LastModified, + ETag: trimEtag(resp.Header.Get("ETag")), + VersionID: resp.Header.Get(amzVersionID), + Expiration: expTime, + ExpirationRuleID: ruleID, + }, nil } diff --git a/api-put-object-file-context.go b/api-put-object-file-context.go index 140a9c069..6c0f20df3 100644 --- a/api-put-object-file-context.go +++ b/api-put-object-file-context.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,31 +23,31 @@ import ( "os" "path/filepath" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/s3utils" ) -// FPutObjectWithContext - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. -func (c Client) FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) { +// FPutObject - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. +func (c Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err + return UploadInfo{}, err } if err := s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err + return UploadInfo{}, err } // Open the referenced file. fileReader, err := os.Open(filePath) // If any error fail quickly here. if err != nil { - return 0, err + return UploadInfo{}, err } defer fileReader.Close() // Save the file stat. fileStat, err := fileReader.Stat() if err != nil { - return 0, err + return UploadInfo{}, err } // Save the file size. @@ -60,5 +60,5 @@ func (c Client) FPutObjectWithContext(ctx context.Context, bucketName, objectNam opts.ContentType = "application/octet-stream" } } - return c.PutObjectWithContext(ctx, bucketName, objectName, fileReader, fileSize, opts) + return c.PutObject(ctx, bucketName, objectName, fileReader, fileSize, opts) } diff --git a/api-put-object-file.go b/api-put-object-file.go deleted file mode 100644 index 7c8e05117..000000000 --- a/api-put-object-file.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" -) - -// FPutObject - Create an object in a bucket, with contents from file at filePath -func (c Client) FPutObject(bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) { - return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) -} diff --git a/api-put-object-multipart.go b/api-put-object-multipart.go index db92520e8..1c862ad96 100644 --- a/api-put-object-multipart.go +++ b/api-put-object-multipart.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,18 +28,17 @@ import ( "io/ioutil" "net/http" "net/url" - "runtime/debug" "sort" "strconv" "strings" - "github.com/minio/minio-go/pkg/encrypt" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" ) func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, - opts PutObjectOptions) (n int64, err error) { - n, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) + opts PutObjectOptions) (info UploadInfo, err error) { + info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) if err != nil { errResp := ToErrorResponse(err) // Verify if multipart functionality is not available, if not @@ -47,22 +46,22 @@ func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName s if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { // Verify if size of reader is greater than '5GiB'. if size > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) } // Fall back to uploading as single PutObject operation. - return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) + return c.putObject(ctx, bucketName, objectName, reader, size, opts) } } - return n, err + return info, err } -func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) { +func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err + return UploadInfo{}, err } if err = s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err + return UploadInfo{}, err } // Total data read and written to server. should be equal to @@ -73,15 +72,15 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje var complMultipartUpload completeMultipartUpload // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, _, err := optimalPartInfo(-1) + totalPartsCount, partSize, _, err := optimalPartInfo(-1, opts.PartSize) if err != nil { - return 0, err + return UploadInfo{}, err } // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { - return 0, err + return UploadInfo{}, err } defer func() { @@ -98,26 +97,27 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje // Create a buffer. buf := make([]byte, partSize) - defer debug.FreeOSMemory() for partNumber <= totalPartsCount { // Choose hash algorithms to be calculated by hashCopyN, // avoid sha256 with non-v4 signature request or // HTTPS connection. - hashAlgos, hashSums := c.hashMaterials() + hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5) - length, rErr := io.ReadFull(reader, buf) - if rErr == io.EOF { + length, rErr := readFull(reader, buf) + if rErr == io.EOF && partNumber > 1 { break } - if rErr != nil && rErr != io.ErrUnexpectedEOF { - return 0, rErr + + if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { + return UploadInfo{}, rErr } // Calculates hash sums while copying partSize bytes into cw. for k, v := range hashAlgos { v.Write(buf[:length]) hashSums[k] = v.Sum(nil) + v.Close() } // Update progress reader appropriately to the latest offset @@ -137,11 +137,10 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje } // Proceed to upload the part. - var objPart ObjectPart - objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, + objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption) - if err != nil { - return totalUploadedSize, err + if uerr != nil { + return UploadInfo{}, uerr } // Save successfully uploaded part metadata. @@ -165,7 +164,7 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje for i := 1; i < partNumber; i++ { part, ok := partsInfo[i] if !ok { - return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) } complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ ETag: part.ETag, @@ -175,12 +174,14 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil { - return totalUploadedSize, err + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return UploadInfo{}, err } - // Return final size. - return totalUploadedSize, nil + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil } // initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. @@ -208,7 +209,7 @@ func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectN } // Execute POST on an objectName to initiate multipart upload. - resp, err := c.executeMethod(ctx, "POST", reqMetadata) + resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) defer closeResponse(resp) if err != nil { return initiateMultipartUploadResult{}, err @@ -238,16 +239,16 @@ func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID return ObjectPart{}, err } if size > maxPartSize { - return ObjectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName) + return ObjectPart{}, errEntityTooLarge(size, maxPartSize, bucketName, objectName) } if size <= -1 { - return ObjectPart{}, ErrEntityTooSmall(size, bucketName, objectName) + return ObjectPart{}, errEntityTooSmall(size, bucketName, objectName) } if partNumber <= 0 { - return ObjectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.") + return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") } if uploadID == "" { - return ObjectPart{}, ErrInvalidArgument("UploadID cannot be empty.") + return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") } // Get resources properly escaped and lined up before using them in http request. @@ -279,7 +280,7 @@ func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID } // Execute PUT on each part. - resp, err := c.executeMethod(ctx, "PUT", reqMetadata) + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) defer closeResponse(resp) if err != nil { return ObjectPart{}, err @@ -294,20 +295,19 @@ func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID objPart.Size = size objPart.PartNumber = partNumber // Trim off the odd double quotes from ETag in the beginning and end. - objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") - objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"") + objPart.ETag = trimEtag(resp.Header.Get("ETag")) return objPart, nil } // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, - complete completeMultipartUpload) (completeMultipartUploadResult, error) { + complete completeMultipartUpload) (UploadInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return completeMultipartUploadResult{}, err + return UploadInfo{}, err } if err := s3utils.CheckValidObjectName(objectName); err != nil { - return completeMultipartUploadResult{}, err + return UploadInfo{}, err } // Initialize url queries. @@ -316,7 +316,7 @@ func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectN // Marshal complete multipart body. completeMultipartUploadBytes, err := xml.Marshal(complete) if err != nil { - return completeMultipartUploadResult{}, err + return UploadInfo{}, err } // Instantiate all the complete multipart buffer. @@ -331,14 +331,14 @@ func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectN } // Execute POST to complete multipart upload for an objectName. - resp, err := c.executeMethod(ctx, "POST", reqMetadata) + resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) defer closeResponse(resp) if err != nil { - return completeMultipartUploadResult{}, err + return UploadInfo{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { - return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) } } @@ -346,14 +346,14 @@ func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectN var b []byte b, err = ioutil.ReadAll(resp.Body) if err != nil { - return completeMultipartUploadResult{}, err + return UploadInfo{}, err } // Decode completed multipart upload response on success. completeMultipartUploadResult := completeMultipartUploadResult{} err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) if err != nil { // xml parsing failure due to presence an ill-formed xml fragment - return completeMultipartUploadResult, err + return UploadInfo{}, err } else if completeMultipartUploadResult.Bucket == "" { // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values @@ -364,9 +364,22 @@ func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectN err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) if err != nil { // xml parsing failure due to presence an ill-formed xml fragment - return completeMultipartUploadResult, err + return UploadInfo{}, err } - return completeMultipartUploadResult, completeMultipartUploadErr + return UploadInfo{}, completeMultipartUploadErr } - return completeMultipartUploadResult, nil + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) + + return UploadInfo{ + Bucket: completeMultipartUploadResult.Bucket, + Key: completeMultipartUploadResult.Key, + ETag: trimEtag(completeMultipartUploadResult.ETag), + VersionID: resp.Header.Get(amzVersionID), + Location: completeMultipartUploadResult.Location, + Expiration: expTime, + ExpirationRuleID: ruleID, + }, nil + } diff --git a/api-put-object-streaming.go b/api-put-object-streaming.go index 211d1c23c..251fee9f8 100644 --- a/api-put-object-streaming.go +++ b/api-put-object-streaming.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,14 +18,18 @@ package minio import ( + "bytes" "context" + "encoding/base64" "fmt" "io" "net/http" + "net/url" "sort" "strings" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/google/uuid" + "github.com/minio/minio-go/v7/pkg/s3utils" ) // putObjectMultipartStream - upload a large object using @@ -38,13 +42,13 @@ import ( // - Any reader which has a method 'ReadAt()' // func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, - reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { + reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { - if !isObject(reader) && isReadAt(reader) { + if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. - n, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) + info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) } else { - n, err = c.putObjectMultipartStreamNoChecksum(ctx, bucketName, objectName, reader, size, opts) + info, err = c.putObjectMultipartStreamOptionalChecksum(ctx, bucketName, objectName, reader, size, opts) } if err != nil { errResp := ToErrorResponse(err) @@ -53,13 +57,13 @@ func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, object if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { // Verify if size of reader is greater than '5GiB'. if size > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) } // Fall back to uploading as single PutObject operation. - return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) + return c.putObject(ctx, bucketName, objectName, reader, size, opts) } } - return n, err + return info, err } // uploadedPartRes - the response received from a part upload. @@ -67,15 +71,15 @@ type uploadedPartRes struct { Error error // Any error encountered while uploading the part. PartNum int // Number of the part uploaded. Size int64 // Size of the part uploaded. - Part *ObjectPart + Part ObjectPart } type uploadPartReq struct { - PartNum int // Number of the part uploaded. - Part *ObjectPart // Size of the part uploaded. + PartNum int // Number of the part uploaded. + Part ObjectPart // Size of the part uploaded. } -// putObjectMultipartFromReadAt - Uploads files bigger than 64MiB. +// putObjectMultipartFromReadAt - Uploads files bigger than 128MiB. // Supports all readers which implements io.ReaderAt interface // (ReadAt method). // @@ -87,25 +91,25 @@ type uploadPartReq struct { // cleaned automatically when the caller i.e http client closes the // stream after uploading all the contents successfully. func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, - reader io.ReaderAt, size int64, opts PutObjectOptions) (n int64, err error) { + reader io.ReaderAt, size int64, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err + return UploadInfo{}, err } if err = s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err + return UploadInfo{}, err } // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) + totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size, opts.PartSize) if err != nil { - return 0, err + return UploadInfo{}, err } // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { - return 0, err + return UploadInfo{}, err } // Aborts the multipart upload in progress, if the @@ -139,12 +143,18 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa // Send each part number to the channel to be processed. for p := 1; p <= totalPartsCount; p++ { - uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil} + uploadPartsCh <- uploadPartReq{PartNum: p} } close(uploadPartsCh) + + var partsBuf = make([][]byte, opts.getNumThreads()) + for i := range partsBuf { + partsBuf[i] = make([]byte, partSize) + } + // Receive each part number from the channel allowing three parallel uploads. for w := 1; w <= opts.getNumThreads(); w++ { - go func(partSize int64) { + go func(w int, partSize int64) { // Each worker will draw from the part channel and upload in parallel. for uploadReq := range uploadPartsCh { @@ -160,17 +170,24 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa partSize = lastPartSize } + n, rerr := readFull(io.NewSectionReader(reader, readOffset, partSize), partsBuf[w-1][:partSize]) + if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { + uploadedPartsCh <- uploadedPartRes{ + Error: rerr, + } + // Exit the goroutine. + return + } + // Get a section reader on a particular offset. - sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) + hookReader := newHook(bytes.NewReader(partsBuf[w-1][:n]), opts.Progress) // Proceed to upload the part. - var objPart ObjectPart - objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, - sectionReader, uploadReq.PartNum, + objPart, err := c.uploadPart(ctx, bucketName, objectName, + uploadID, hookReader, uploadReq.PartNum, "", "", partSize, opts.ServerSideEncryption) if err != nil { uploadedPartsCh <- uploadedPartRes{ - Size: 0, Error: err, } // Exit the goroutine. @@ -178,17 +195,16 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa } // Save successfully uploaded part metadata. - uploadReq.Part = &objPart + uploadReq.Part = objPart // Send successful part info through the channel. uploadedPartsCh <- uploadedPartRes{ Size: objPart.Size, PartNum: uploadReq.PartNum, Part: uploadReq.Part, - Error: nil, } } - }(partSize) + }(w, partSize) } // Gather the responses as they occur and update any @@ -196,58 +212,53 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa for u := 1; u <= totalPartsCount; u++ { uploadRes := <-uploadedPartsCh if uploadRes.Error != nil { - return totalUploadedSize, uploadRes.Error - } - // Retrieve each uploaded part and store it to be completed. - // part, ok := partsInfo[uploadRes.PartNum] - part := uploadRes.Part - if part == nil { - return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum)) + return UploadInfo{}, uploadRes.Error } // Update the totalUploadedSize. totalUploadedSize += uploadRes.Size // Store the parts to be completed in order. complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, + ETag: uploadRes.Part.ETag, + PartNumber: uploadRes.Part.PartNumber, }) } // Verify if we uploaded all the data. if totalUploadedSize != size { - return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) } // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) if err != nil { - return totalUploadedSize, err + return UploadInfo{}, err } - // Return final size. - return totalUploadedSize, nil + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil } -func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketName, objectName string, - reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { +func (c Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err + return UploadInfo{}, err } if err = s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err + return UploadInfo{}, err } // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) + totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size, opts.PartSize) if err != nil { - return 0, err + return UploadInfo{}, err } // Initiates a new multipart request uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { - return 0, err + return UploadInfo{}, err } // Aborts the multipart upload if the function returns @@ -266,23 +277,52 @@ func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketNa // Initialize parts uploaded map. partsInfo := make(map[int]ObjectPart) + // Create a buffer. + buf := make([]byte, partSize) + + // Avoid declaring variables in the for loop + var md5Base64 string + var hookReader io.Reader + // Part number always starts with '1'. var partNumber int for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { - // Update progress reader appropriately to the latest offset - // as we read from the source. - hookReader := newHook(reader, opts.Progress) // Proceed to upload the part. if partNumber == totalPartsCount { partSize = lastPartSize } - var objPart ObjectPart - objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, + + if opts.SendContentMd5 { + length, rerr := readFull(reader, buf) + if rerr == io.EOF && partNumber > 1 { + break + } + + if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { + return UploadInfo{}, rerr + } + + // Calculate md5sum. + hash := c.md5Hasher() + hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + hash.Close() + + // Update progress reader appropriately to the latest offset + // as we read from the source. + hookReader = newHook(bytes.NewReader(buf[:length]), opts.Progress) + } else { + // Update progress reader appropriately to the latest offset + // as we read from the source. + hookReader = newHook(reader, opts.Progress) + } + + objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, io.LimitReader(hookReader, partSize), - partNumber, "", "", partSize, opts.ServerSideEncryption) - if err != nil { - return totalUploadedSize, err + partNumber, md5Base64, "", partSize, opts.ServerSideEncryption) + if uerr != nil { + return UploadInfo{}, uerr } // Save successfully uploaded part metadata. @@ -295,7 +335,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketNa // Verify if we uploaded all the data. if size > 0 { if totalUploadedSize != size { - return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) } } @@ -307,7 +347,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketNa for i := 1; i < partNumber; i++ { part, ok := partsInfo[i] if !ok { - return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) } complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ ETag: part.ETag, @@ -317,67 +357,86 @@ func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketNa // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) if err != nil { - return totalUploadedSize, err + return UploadInfo{}, err } - // Return final size. - return totalUploadedSize, nil + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil } -// putObjectNoChecksum special function used Google Cloud Storage. This special function +// putObject special function used Google Cloud Storage. This special function // is used for Google Cloud Storage since Google's multipart API is not S3 compatible. -func (c Client) putObjectNoChecksum(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { +func (c Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err + return UploadInfo{}, err } if err := s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err + return UploadInfo{}, err } // Size -1 is only supported on Google Cloud Storage, we error // out in all other situations. if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) { - return 0, ErrEntityTooSmall(size, bucketName, objectName) + return UploadInfo{}, errEntityTooSmall(size, bucketName, objectName) + } + + if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 { + return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'") } + if size > 0 { if isReadAt(reader) && !isObject(reader) { - seeker, _ := reader.(io.Seeker) - offset, err := seeker.Seek(0, io.SeekCurrent) - if err != nil { - return 0, ErrInvalidArgument(err.Error()) + seeker, ok := reader.(io.Seeker) + if ok { + offset, err := seeker.Seek(0, io.SeekCurrent) + if err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } + reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) } - reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) } } + var md5Base64 string + if opts.SendContentMd5 { + // Create a buffer. + buf := make([]byte, size) + + length, rErr := readFull(reader, buf) + if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { + return UploadInfo{}, rErr + } + + // Calculate md5sum. + hash := c.md5Hasher() + hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + reader = bytes.NewReader(buf[:length]) + hash.Close() + } + // Update progress reader appropriately to the latest offset as we // read from the source. readSeeker := newHook(reader, opts.Progress) // This function does not calculate sha256 and md5sum for payload. // Execute put object. - st, err := c.putObjectDo(ctx, bucketName, objectName, readSeeker, "", "", size, opts) - if err != nil { - return 0, err - } - if st.Size != size { - return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) - } - return size, nil + return c.putObjectDo(ctx, bucketName, objectName, readSeeker, md5Base64, "", size, opts) } // putObjectDo - executes the put object http operation. // NOTE: You must have WRITE permissions on a bucket to add an object to it. -func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (ObjectInfo, error) { +func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectInfo{}, err + return UploadInfo{}, err } if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectInfo{}, err + return UploadInfo{}, err } // Set headers. customHeader := opts.Header() @@ -392,26 +451,36 @@ func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, contentMD5Base64: md5Base64, contentSHA256Hex: sha256Hex, } - + if opts.ReplicationVersionID != "" { + if _, err := uuid.Parse(opts.ReplicationVersionID); err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } + urlValues := make(url.Values) + urlValues.Set("versionId", opts.ReplicationVersionID) + reqMetadata.queryValues = urlValues + } // Execute PUT an objectName. - resp, err := c.executeMethod(ctx, "PUT", reqMetadata) + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) defer closeResponse(resp) if err != nil { - return ObjectInfo{}, err + return UploadInfo{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { - return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) } } - var objInfo ObjectInfo - // Trim off the odd double quotes from ETag in the beginning and end. - objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") - objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"") - // A success here means data was written to server successfully. - objInfo.Size = size - - // Return here. - return objInfo, nil + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) + + return UploadInfo{ + Bucket: bucketName, + Key: objectName, + ETag: trimEtag(resp.Header.Get("ETag")), + VersionID: resp.Header.Get(amzVersionID), + Size: size, + Expiration: expTime, + ExpirationRuleID: ruleID, + }, nil } diff --git a/api-put-object.go b/api-put-object.go index 0330cd99d..b0d5af466 100644 --- a/api-put-object.go +++ b/api-put-object.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,30 +20,61 @@ package minio import ( "bytes" "context" + "encoding/base64" + "errors" "fmt" "io" "net/http" - "runtime/debug" "sort" + "time" - "github.com/minio/minio-go/pkg/encrypt" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" "golang.org/x/net/http/httpguts" ) +// ReplicationStatus represents replication status of object +type ReplicationStatus string + +const ( + // ReplicationStatusPending indicates replication is pending + ReplicationStatusPending ReplicationStatus = "PENDING" + // ReplicationStatusComplete indicates replication completed ok + ReplicationStatusComplete ReplicationStatus = "COMPLETE" + // ReplicationStatusFailed indicates replication failed + ReplicationStatusFailed ReplicationStatus = "FAILED" + // ReplicationStatusReplica indicates object is a replica of a source + ReplicationStatusReplica ReplicationStatus = "REPLICA" +) + +// Empty returns true if no replication status set. +func (r ReplicationStatus) Empty() bool { + return r == "" +} + // PutObjectOptions represents options specified by user for PutObject call type PutObjectOptions struct { UserMetadata map[string]string + UserTags map[string]string Progress io.Reader ContentType string ContentEncoding string ContentDisposition string ContentLanguage string CacheControl string + Mode RetentionMode + RetainUntilDate time.Time ServerSideEncryption encrypt.ServerSide NumThreads uint StorageClass string WebsiteRedirectLocation string + PartSize uint64 + LegalHold LegalHoldStatus + SendContentMd5 bool + DisableMultipart bool + ReplicationVersionID string + ReplicationStatus ReplicationStatus + ReplicationMTime time.Time } // getNumThreads - gets the number of threads to be used in the multipart @@ -62,37 +93,64 @@ func (opts PutObjectOptions) getNumThreads() (numThreads int) { func (opts PutObjectOptions) Header() (header http.Header) { header = make(http.Header) - if opts.ContentType != "" { - header["Content-Type"] = []string{opts.ContentType} - } else { - header["Content-Type"] = []string{"application/octet-stream"} + contentType := opts.ContentType + if contentType == "" { + contentType = "application/octet-stream" } + header.Set("Content-Type", contentType) + if opts.ContentEncoding != "" { - header["Content-Encoding"] = []string{opts.ContentEncoding} + header.Set("Content-Encoding", opts.ContentEncoding) } if opts.ContentDisposition != "" { - header["Content-Disposition"] = []string{opts.ContentDisposition} + header.Set("Content-Disposition", opts.ContentDisposition) } if opts.ContentLanguage != "" { - header["Content-Language"] = []string{opts.ContentLanguage} + header.Set("Content-Language", opts.ContentLanguage) } if opts.CacheControl != "" { - header["Cache-Control"] = []string{opts.CacheControl} + header.Set("Cache-Control", opts.CacheControl) + } + + if opts.Mode != "" { + header.Set(amzLockMode, opts.Mode.String()) + } + + if !opts.RetainUntilDate.IsZero() { + header.Set("X-Amz-Object-Lock-Retain-Until-Date", opts.RetainUntilDate.Format(time.RFC3339)) } + + if opts.LegalHold != "" { + header.Set(amzLegalHoldHeader, opts.LegalHold.String()) + } + if opts.ServerSideEncryption != nil { opts.ServerSideEncryption.Marshal(header) } + if opts.StorageClass != "" { - header[amzStorageClass] = []string{opts.StorageClass} + header.Set(amzStorageClass, opts.StorageClass) } + if opts.WebsiteRedirectLocation != "" { - header[amzWebsiteRedirectLocation] = []string{opts.WebsiteRedirectLocation} + header.Set(amzWebsiteRedirectLocation, opts.WebsiteRedirectLocation) + } + + if !opts.ReplicationStatus.Empty() { + header.Set(amzBucketReplicationStatus, string(opts.ReplicationStatus)) + } + if !opts.ReplicationMTime.IsZero() { + header.Set(minIOBucketReplicationSourceMTime, opts.ReplicationMTime.Format(time.RFC3339)) + } + if len(opts.UserTags) != 0 { + header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags)) } + for k, v := range opts.UserMetadata { - if !isAmzHeader(k) && !isStandardHeader(k) && !isStorageClassHeader(k) { - header["X-Amz-Meta-"+k] = []string{v} + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { + header.Set(k, v) } else { - header[k] = []string{v} + header.Set("x-amz-meta-"+k, v) } } return @@ -102,12 +160,18 @@ func (opts PutObjectOptions) Header() (header http.Header) { func (opts PutObjectOptions) validate() (err error) { for k, v := range opts.UserMetadata { if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { - return ErrInvalidArgument(k + " unsupported user defined metadata name") + return errInvalidArgument(k + " unsupported user defined metadata name") } if !httpguts.ValidHeaderFieldValue(v) { - return ErrInvalidArgument(v + " unsupported user defined metadata value") + return errInvalidArgument(v + " unsupported user defined metadata value") } } + if opts.Mode != "" && !opts.Mode.IsValid() { + return errInvalidArgument(opts.Mode.String() + " unsupported retention mode") + } + if opts.LegalHold != "" && !opts.LegalHold.IsValid() { + return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status") + } return nil } @@ -123,54 +187,68 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part // // You must have WRITE permissions on a bucket to create an object. // -// - For size smaller than 64MiB PutObject automatically does a +// - For size smaller than 128MiB PutObject automatically does a // single atomic Put operation. -// - For size larger than 64MiB PutObject automatically does a +// - For size larger than 128MiB PutObject automatically does a // multipart Put operation. // - For size input as -1 PutObject does a multipart Put operation // until input stream reaches EOF. Maximum object size that can // be uploaded through this operation will be 5TiB. -func (c Client) PutObject(bucketName, objectName string, reader io.Reader, objectSize int64, - opts PutObjectOptions) (n int64, err error) { - return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, objectSize, opts) +func (c Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, + opts PutObjectOptions) (info UploadInfo, err error) { + if objectSize < 0 && opts.DisableMultipart { + return UploadInfo{}, errors.New("object size must be provided with disable multipart upload") + } + + err = opts.validate() + if err != nil { + return UploadInfo{}, err + } + + return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) } -func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { +func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { // Check for largest object size allowed. if size > int64(maxMultipartPutObjectSize) { - return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) + return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) } // NOTE: Streaming signature is not supported by GCS. if s3utils.IsGoogleEndpoint(*c.endpointURL) { - // Do not compute MD5 for Google Cloud Storage. - return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + + partSize := opts.PartSize + if opts.PartSize == 0 { + partSize = minPartSize } if c.overrideSignerType.IsV2() { - if size >= 0 && size < minPartSize { - return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) + if size >= 0 && size < int64(partSize) || opts.DisableMultipart { + return c.putObject(ctx, bucketName, objectName, reader, size, opts) } return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts) } + if size < 0 { return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) } - if size < minPartSize { - return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) + if size < int64(partSize) || opts.DisableMultipart { + return c.putObject(ctx, bucketName, objectName, reader, size, opts) } - // For all sizes greater than 64MiB do multipart. + return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) } -func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) { +func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err + return UploadInfo{}, err } if err = s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err + return UploadInfo{}, err } // Total data read and written to server. should be equal to @@ -181,14 +259,14 @@ func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName var complMultipartUpload completeMultipartUpload // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, _, err := optimalPartInfo(-1) + totalPartsCount, partSize, _, err := optimalPartInfo(-1, opts.PartSize) if err != nil { - return 0, err + return UploadInfo{}, err } // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { - return 0, err + return UploadInfo{}, err } defer func() { @@ -205,26 +283,35 @@ func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName // Create a buffer. buf := make([]byte, partSize) - defer debug.FreeOSMemory() for partNumber <= totalPartsCount { - length, rErr := io.ReadFull(reader, buf) - if rErr == io.EOF && partNumber > 1 { + length, rerr := readFull(reader, buf) + if rerr == io.EOF && partNumber > 1 { break } - if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { - return 0, rErr + + if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF { + return UploadInfo{}, rerr + } + + var md5Base64 string + if opts.SendContentMd5 { + // Calculate md5sum. + hash := c.md5Hasher() + hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + hash.Close() } + // Update progress reader appropriately to the latest offset // as we read from the source. rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) // Proceed to upload the part. - var objPart ObjectPart - objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, - "", "", int64(length), opts.ServerSideEncryption) - if err != nil { - return totalUploadedSize, err + objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, + md5Base64, "", int64(length), opts.ServerSideEncryption) + if uerr != nil { + return UploadInfo{}, uerr } // Save successfully uploaded part metadata. @@ -238,7 +325,7 @@ func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName // For unknown size, Read EOF we break away. // We do not have to upload till totalPartsCount. - if rErr == io.EOF { + if rerr == io.EOF { break } } @@ -248,7 +335,7 @@ func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName for i := 1; i < partNumber; i++ { part, ok := partsInfo[i] if !ok { - return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) } complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ ETag: part.ETag, @@ -258,10 +345,12 @@ func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil { - return totalUploadedSize, err + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return UploadInfo{}, err } - // Return final size. - return totalUploadedSize, nil + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil } diff --git a/api-put-object_test.go b/api-put-object_test.go index d96abab9a..2867f3a19 100644 --- a/api-put-object_test.go +++ b/api-put-object_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/api-remove.go b/api-remove.go index f33df4dfc..6c2ab7802 100644 --- a/api-remove.go +++ b/api-remove.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,20 +25,20 @@ import ( "net/http" "net/url" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/s3utils" ) // RemoveBucket deletes the bucket name. // // All objects (including all object versions and delete markers). // in the bucket must be deleted before successfully attempting this request. -func (c Client) RemoveBucket(bucketName string) error { +func (c Client) RemoveBucket(ctx context.Context, bucketName string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } // Execute DELETE on bucket. - resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ bucketName: bucketName, contentSHA256Hex: emptySHA256Hex, }) @@ -58,8 +58,14 @@ func (c Client) RemoveBucket(bucketName string) error { return nil } -// RemoveObject remove an object from a bucket. -func (c Client) RemoveObject(bucketName, objectName string) error { +// RemoveObjectOptions represents options specified by user for RemoveObject call +type RemoveObjectOptions struct { + GovernanceBypass bool + VersionID string +} + +// RemoveObject removes an object from a bucket. +func (c Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -67,11 +73,29 @@ func (c Client) RemoveObject(bucketName, objectName string) error { if err := s3utils.CheckValidObjectName(objectName); err != nil { return err } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Build headers. + headers := make(http.Header) + + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } // Execute DELETE on objectName. - resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ bucketName: bucketName, objectName: objectName, contentSHA256Hex: emptySHA256Hex, + queryValues: urlValues, + customHeader: headers, }) defer closeResponse(resp) if err != nil { @@ -93,22 +117,26 @@ func (c Client) RemoveObject(bucketName, objectName string) error { // RemoveObjectError - container of Multi Delete S3 API error type RemoveObjectError struct { ObjectName string + VersionID string Err error } // generateRemoveMultiObjects - generate the XML request for remove multi objects request -func generateRemoveMultiObjectsRequest(objects []string) []byte { - rmObjects := []deleteObject{} +func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte { + delObjects := []deleteObject{} for _, obj := range objects { - rmObjects = append(rmObjects, deleteObject{Key: obj}) + delObjects = append(delObjects, deleteObject{ + Key: obj.Key, + VersionID: obj.VersionID, + }) } - xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true}) + xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: delObjects, Quiet: true}) return xmlBytes } // processRemoveMultiObjectsResponse - parse the remove multi objects web service // and return the success/failure result status for each object -func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) { +func processRemoveMultiObjectsResponse(body io.Reader, objects []ObjectInfo, errorCh chan<- RemoveObjectError) { // Parse multi delete XML response rmResult := &deleteMultiObjectsResult{} err := xmlDecoder(body, rmResult) @@ -129,8 +157,15 @@ func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh } } -// RemoveObjectsWithContext - Identical to RemoveObjects call, but accepts context to facilitate request cancellation. -func (c Client) RemoveObjectsWithContext(ctx context.Context, bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { +// RemoveObjectsOptions represents options specified by user for RemoveObjects call +type RemoveObjectsOptions struct { + GovernanceBypass bool +} + +// RemoveObjects removes multiple objects from a bucket while +// it is possible to specify objects versions which are received from +// objectsCh. Remove failures are sent back via error channel. +func (c Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError { errorCh := make(chan RemoveObjectError, 1) // Validate if bucket name is valid. @@ -145,87 +180,94 @@ func (c Client) RemoveObjectsWithContext(ctx context.Context, bucketName string, if objectsCh == nil { defer close(errorCh) errorCh <- RemoveObjectError{ - Err: ErrInvalidArgument("Objects channel cannot be nil"), + Err: errInvalidArgument("Objects channel cannot be nil"), } return errorCh } - // Generate and call MultiDelete S3 requests based on entries received from objectsCh - go func(errorCh chan<- RemoveObjectError) { - maxEntries := 1000 - finish := false - urlValues := make(url.Values) - urlValues.Set("delete", "") + go c.removeObjects(ctx, bucketName, objectsCh, errorCh, opts) + return errorCh +} - // Close error channel when Multi delete finishes. - defer close(errorCh) +// Generate and call MultiDelete S3 requests based on entries received from objectsCh +func (c Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, errorCh chan<- RemoveObjectError, opts RemoveObjectsOptions) { + maxEntries := 1000 + finish := false + urlValues := make(url.Values) + urlValues.Set("delete", "") - // Loop over entries by 1000 and call MultiDelete requests - for { - if finish { - break - } - count := 0 - var batch []string - - // Try to gather 1000 entries - for object := range objectsCh { - batch = append(batch, object) - if count++; count >= maxEntries { - break - } - } - if count == 0 { - // Multi Objects Delete API doesn't accept empty object list, quit immediately + // Close error channel when Multi delete finishes. + defer close(errorCh) + + // Loop over entries by 1000 and call MultiDelete requests + for { + if finish { + break + } + count := 0 + var batch []ObjectInfo + + // Try to gather 1000 entries + for object := range objectsCh { + batch = append(batch, object) + if count++; count >= maxEntries { break } - if count < maxEntries { - // We didn't have 1000 entries, so this is the last batch - finish = true - } + } + if count == 0 { + // Multi Objects Delete API doesn't accept empty object list, quit immediately + break + } + if count < maxEntries { + // We didn't have 1000 entries, so this is the last batch + finish = true + } - // Generate remove multi objects XML request - removeBytes := generateRemoveMultiObjectsRequest(batch) - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, "POST", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(removeBytes), - contentLength: int64(len(removeBytes)), - contentMD5Base64: sumMD5Base64(removeBytes), - contentSHA256Hex: sum256Hex(removeBytes), - }) - if resp != nil { - if resp.StatusCode != http.StatusOK { - e := httpRespToErrorResponse(resp, bucketName, "") - errorCh <- RemoveObjectError{ObjectName: "", Err: e} - } + // Build headers. + headers := make(http.Header) + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } + + // Generate remove multi objects XML request + removeBytes := generateRemoveMultiObjectsRequest(batch) + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(removeBytes), + contentLength: int64(len(removeBytes)), + contentMD5Base64: sumMD5Base64(removeBytes), + contentSHA256Hex: sum256Hex(removeBytes), + customHeader: headers, + }) + if resp != nil { + if resp.StatusCode != http.StatusOK { + e := httpRespToErrorResponse(resp, bucketName, "") + errorCh <- RemoveObjectError{ObjectName: "", Err: e} } - if err != nil { - for _, b := range batch { - errorCh <- RemoveObjectError{ObjectName: b, Err: err} + } + if err != nil { + for _, b := range batch { + errorCh <- RemoveObjectError{ + ObjectName: b.Key, + VersionID: b.VersionID, + Err: err, } - continue } - - // Process multiobjects remove xml response - processRemoveMultiObjectsResponse(resp.Body, batch, errorCh) - - closeResponse(resp) + continue } - }(errorCh) - return errorCh -} -// RemoveObjects removes multiple objects from a bucket. -// The list of objects to remove are received from objectsCh. -// Remove failures are sent back via error channel. -func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { - return c.RemoveObjectsWithContext(context.Background(), bucketName, objectsCh) + // Process multiobjects remove xml response + processRemoveMultiObjectsResponse(resp.Body, batch, errorCh) + + closeResponse(resp) + } } // RemoveIncompleteUpload aborts an partially uploaded object. -func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { +func (c Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -234,14 +276,14 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { return err } // Find multipart upload ids of the object to be aborted. - uploadIDs, err := c.findUploadIDs(bucketName, objectName) + uploadIDs, err := c.findUploadIDs(ctx, bucketName, objectName) if err != nil { return err } for _, uploadID := range uploadIDs { // abort incomplete multipart upload, based on the upload id passed. - err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID) + err := c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) if err != nil { return err } @@ -266,7 +308,7 @@ func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName urlValues.Set("uploadId", uploadID) // Execute DELETE on multipart upload. - resp, err := c.executeMethod(ctx, "DELETE", requestMetadata{ + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ bucketName: bucketName, objectName: objectName, queryValues: urlValues, diff --git a/api-s3-datatypes.go b/api-s3-datatypes.go index 8d8880c05..ac3445745 100644 --- a/api-s3-datatypes.go +++ b/api-s3-datatypes.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,6 +19,9 @@ package minio import ( "encoding/xml" + "errors" + "io" + "reflect" "time" ) @@ -71,6 +74,103 @@ type ListBucketV2Result struct { StartAfter string } +// Version is an element in the list object versions response +type Version struct { + ETag string + IsLatest bool + Key string + LastModified time.Time + Owner Owner + Size int64 + StorageClass string + VersionID string `xml:"VersionId"` + + isDeleteMarker bool +} + +// ListVersionsResult is an element in the list object versions response +type ListVersionsResult struct { + Versions []Version + + CommonPrefixes []CommonPrefix + Name string + Prefix string + Delimiter string + MaxKeys int64 + EncodingType string + IsTruncated bool + KeyMarker string + VersionIDMarker string + NextKeyMarker string + NextVersionIDMarker string +} + +// UnmarshalXML is a custom unmarshal code for the response of ListObjectVersions, the custom +// code will unmarshal and tags and save them in Versions field to +// preserve the lexical order of the listing. +func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { + for { + // Read tokens from the XML document in a stream. + t, err := d.Token() + if err != nil { + if err == io.EOF { + break + } + return err + } + + switch se := t.(type) { + case xml.StartElement: + tagName := se.Name.Local + switch tagName { + case "Name", "Prefix", + "Delimiter", "EncodingType", + "KeyMarker", "VersionIdMarker", + "NextKeyMarker", "NextVersionIdMarker": + var s string + if err = d.DecodeElement(&s, &se); err != nil { + return err + } + v := reflect.ValueOf(l).Elem().FieldByName(tagName) + if v.IsValid() { + v.SetString(s) + } + case "IsTruncated": // bool + var b bool + if err = d.DecodeElement(&b, &se); err != nil { + return err + } + l.IsTruncated = b + case "MaxKeys": // int64 + var i int64 + if err = d.DecodeElement(&i, &se); err != nil { + return err + } + l.MaxKeys = i + case "CommonPrefixes": + var cp CommonPrefix + if err = d.DecodeElement(&cp, &se); err != nil { + return err + } + l.CommonPrefixes = append(l.CommonPrefixes, cp) + case "DeleteMarker", "Version": + var v Version + if err = d.DecodeElement(&v, &se); err != nil { + return err + } + if tagName == "DeleteMarker" { + v.isDeleteMarker = true + } + l.Versions = append(l.Versions, v) + default: + return errors.New("unrecognized option:" + tagName) + } + + } + } + return nil +} + // ListBucketResult container for listObjects response. type ListBucketResult struct { // A response can contain CommonPrefixes only if you have diff --git a/api-select.go b/api-select.go index 10e1d47d6..e35cf02bf 100644 --- a/api-select.go +++ b/api-select.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2018 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2018-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -31,8 +31,8 @@ import ( "net/url" "strings" - "github.com/minio/minio-go/pkg/encrypt" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" ) // CSVFileHeaderInfo - is the parameter for whether to utilize headers. @@ -88,31 +88,244 @@ type ParquetInputOptions struct{} // CSVInputOptions csv input specific options type CSVInputOptions struct { - FileHeaderInfo CSVFileHeaderInfo - RecordDelimiter string - FieldDelimiter string - QuoteCharacter string - QuoteEscapeCharacter string - Comments string + FileHeaderInfo CSVFileHeaderInfo + fileHeaderInfoSet bool + + RecordDelimiter string + recordDelimiterSet bool + + FieldDelimiter string + fieldDelimiterSet bool + + QuoteCharacter string + quoteCharacterSet bool + + QuoteEscapeCharacter string + quoteEscapeCharacterSet bool + + Comments string + commentsSet bool +} + +// SetFileHeaderInfo sets the file header info in the CSV input options +func (c *CSVInputOptions) SetFileHeaderInfo(val CSVFileHeaderInfo) { + c.FileHeaderInfo = val + c.fileHeaderInfoSet = true +} + +// SetRecordDelimiter sets the record delimiter in the CSV input options +func (c *CSVInputOptions) SetRecordDelimiter(val string) { + c.RecordDelimiter = val + c.recordDelimiterSet = true +} + +// SetFieldDelimiter sets the field delimiter in the CSV input options +func (c *CSVInputOptions) SetFieldDelimiter(val string) { + c.FieldDelimiter = val + c.fieldDelimiterSet = true +} + +// SetQuoteCharacter sets the quote character in the CSV input options +func (c *CSVInputOptions) SetQuoteCharacter(val string) { + c.QuoteCharacter = val + c.quoteCharacterSet = true +} + +// SetQuoteEscapeCharacter sets the quote escape character in the CSV input options +func (c *CSVInputOptions) SetQuoteEscapeCharacter(val string) { + c.QuoteEscapeCharacter = val + c.quoteEscapeCharacterSet = true +} + +// SetComments sets the comments character in the CSV input options +func (c *CSVInputOptions) SetComments(val string) { + c.Comments = val + c.commentsSet = true +} + +// MarshalXML - produces the xml representation of the CSV input options struct +func (c CSVInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + if c.FileHeaderInfo != "" || c.fileHeaderInfoSet { + if err := e.EncodeElement(c.FileHeaderInfo, xml.StartElement{Name: xml.Name{Local: "FileHeaderInfo"}}); err != nil { + return err + } + } + + if c.RecordDelimiter != "" || c.recordDelimiterSet { + if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { + return err + } + } + + if c.FieldDelimiter != "" || c.fieldDelimiterSet { + if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil { + return err + } + } + + if c.QuoteCharacter != "" || c.quoteCharacterSet { + if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil { + return err + } + } + + if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet { + if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil { + return err + } + } + + if c.Comments != "" || c.commentsSet { + if err := e.EncodeElement(c.Comments, xml.StartElement{Name: xml.Name{Local: "Comments"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) } // CSVOutputOptions csv output specific options type CSVOutputOptions struct { - QuoteFields CSVQuoteFields - RecordDelimiter string - FieldDelimiter string - QuoteCharacter string - QuoteEscapeCharacter string + QuoteFields CSVQuoteFields + quoteFieldsSet bool + + RecordDelimiter string + recordDelimiterSet bool + + FieldDelimiter string + fieldDelimiterSet bool + + QuoteCharacter string + quoteCharacterSet bool + + QuoteEscapeCharacter string + quoteEscapeCharacterSet bool +} + +// SetQuoteFields sets the quote field parameter in the CSV output options +func (c *CSVOutputOptions) SetQuoteFields(val CSVQuoteFields) { + c.QuoteFields = val + c.quoteFieldsSet = true +} + +// SetRecordDelimiter sets the record delimiter character in the CSV output options +func (c *CSVOutputOptions) SetRecordDelimiter(val string) { + c.RecordDelimiter = val + c.recordDelimiterSet = true +} + +// SetFieldDelimiter sets the field delimiter character in the CSV output options +func (c *CSVOutputOptions) SetFieldDelimiter(val string) { + c.FieldDelimiter = val + c.fieldDelimiterSet = true +} + +// SetQuoteCharacter sets the quote character in the CSV output options +func (c *CSVOutputOptions) SetQuoteCharacter(val string) { + c.QuoteCharacter = val + c.quoteCharacterSet = true +} + +// SetQuoteEscapeCharacter sets the quote escape character in the CSV output options +func (c *CSVOutputOptions) SetQuoteEscapeCharacter(val string) { + c.QuoteEscapeCharacter = val + c.quoteEscapeCharacterSet = true +} + +// MarshalXML - produces the xml representation of the CSVOutputOptions struct +func (c CSVOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + if c.QuoteFields != "" || c.quoteFieldsSet { + if err := e.EncodeElement(c.QuoteFields, xml.StartElement{Name: xml.Name{Local: "QuoteFields"}}); err != nil { + return err + } + } + + if c.RecordDelimiter != "" || c.recordDelimiterSet { + if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { + return err + } + } + + if c.FieldDelimiter != "" || c.fieldDelimiterSet { + if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil { + return err + } + } + + if c.QuoteCharacter != "" || c.quoteCharacterSet { + if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil { + return err + } + } + + if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet { + if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) } // JSONInputOptions json input specific options type JSONInputOptions struct { - Type JSONType + Type JSONType + typeSet bool +} + +// SetType sets the JSON type in the JSON input options +func (j *JSONInputOptions) SetType(typ JSONType) { + j.Type = typ + j.typeSet = true +} + +// MarshalXML - produces the xml representation of the JSONInputOptions struct +func (j JSONInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + if j.Type != "" || j.typeSet { + if err := e.EncodeElement(j.Type, xml.StartElement{Name: xml.Name{Local: "Type"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) } // JSONOutputOptions - json output specific options type JSONOutputOptions struct { - RecordDelimiter string + RecordDelimiter string + recordDelimiterSet bool +} + +// SetRecordDelimiter sets the record delimiter in the JSON output options +func (j *JSONOutputOptions) SetRecordDelimiter(val string) { + j.RecordDelimiter = val + j.recordDelimiterSet = true +} + +// MarshalXML - produces the xml representation of the JSONOutputOptions struct +func (j JSONOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + if j.RecordDelimiter != "" || j.recordDelimiterSet { + if err := e.EncodeElement(j.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) } // SelectObjectInputSerialization - input serialization parameters @@ -237,7 +450,7 @@ func (c Client) SelectObjectContent(ctx context.Context, bucketName, objectName urlValues.Set("select-type", "2") // Execute POST on bucket/object. - resp, err := c.executeMethod(ctx, "POST", requestMetadata{ + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ bucketName: bucketName, objectName: objectName, queryValues: urlValues, @@ -251,6 +464,12 @@ func (c Client) SelectObjectContent(ctx context.Context, bucketName, objectName return nil, err } + return NewSelectResults(resp, bucketName) +} + +// NewSelectResults creates a Select Result parser that parses the response +// and returns a Reader that will return parsed and assembled select output. +func NewSelectResults(resp *http.Response, bucketName string) (*SelectResults, error) { if resp.StatusCode != http.StatusOK { return nil, httpRespToErrorResponse(resp, bucketName, "") } @@ -325,7 +544,7 @@ func (s *SelectResults) start(pipeWriter *io.PipeWriter) { switch m { case errorMsg: - pipeWriter.CloseWithError(errors.New("Error Type of " + headers.Get("error-type") + " " + headers.Get("error-message"))) + pipeWriter.CloseWithError(errors.New(headers.Get("error-code") + ":\"" + headers.Get("error-message") + "\"")) closeResponse(s.resp) return case commonMsg: @@ -490,7 +709,7 @@ func extractString(source io.Reader, lenBytes int) (string, error) { // extractUint32 extracts a 4 byte integer from the byte array. func extractUint32(r io.Reader) (uint32, error) { buf := make([]byte, 4) - _, err := io.ReadFull(r, buf) + _, err := readFull(r, buf) if err != nil { return 0, err } @@ -500,7 +719,7 @@ func extractUint32(r io.Reader) (uint32, error) { // extractUint16 extracts a 2 byte integer from the byte array. func extractUint16(r io.Reader) (uint16, error) { buf := make([]byte, 2) - _, err := io.ReadFull(r, buf) + _, err := readFull(r, buf) if err != nil { return 0, err } @@ -510,7 +729,7 @@ func extractUint16(r io.Reader) (uint16, error) { // extractUint8 extracts a 1 byte integer from the byte array. func extractUint8(r io.Reader) (uint8, error) { buf := make([]byte, 1) - _, err := io.ReadFull(r, buf) + _, err := readFull(r, buf) if err != nil { return 0, err } diff --git a/api-stat.go b/api-stat.go index 91e9d3964..ea9c30970 100644 --- a/api-stat.go +++ b/api-stat.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,22 +20,21 @@ package minio import ( "context" "net/http" - "strconv" - "strings" - "time" + "net/url" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/s3utils" ) -// BucketExists verify if bucket exists and you have permission to access it. -func (c Client) BucketExists(bucketName string) (bool, error) { +// BucketExists verifies if bucket exists and you have permission to access it. Allows for a Context to +// control cancellations and timeouts. +func (c Client) BucketExists(ctx context.Context, bucketName string) (bool, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return false, err } // Execute HEAD on bucketName. - resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{ + resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{ bucketName: bucketName, contentSHA256Hex: emptySHA256Hex, }) @@ -58,38 +57,8 @@ func (c Client) BucketExists(bucketName string) (bool, error) { return true, nil } -// List of header keys to be filtered, usually -// from all S3 API http responses. -var defaultFilterKeys = []string{ - "Connection", - "Transfer-Encoding", - "Accept-Ranges", - "Date", - "Server", - "Vary", - "x-amz-bucket-region", - "x-amz-request-id", - "x-amz-id-2", - "Content-Security-Policy", - "X-Xss-Protection", - - // Add new headers to be ignored. -} - -// Extract only necessary metadata header key/values by -// filtering them out with a list of custom header keys. -func extractObjMetadata(header http.Header) http.Header { - filterKeys := append([]string{ - "ETag", - "Content-Length", - "Last-Modified", - "Content-Type", - }, defaultFilterKeys...) - return filterHeader(header, filterKeys) -} - // StatObject verifies if object exists and you have permission to access. -func (c Client) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { +func (c Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectInfo{}, err @@ -97,7 +66,7 @@ func (c Client) StatObject(bucketName, objectName string, opts StatObjectOptions if err := s3utils.CheckValidObjectName(objectName); err != nil { return ObjectInfo{}, err } - return c.statObject(context.Background(), bucketName, objectName, opts) + return c.statObject(ctx, bucketName, objectName, opts) } // Lower level API for statObject supporting pre-conditions and range headers. @@ -110,10 +79,16 @@ func (c Client) statObject(ctx context.Context, bucketName, objectName string, o return ObjectInfo{}, err } + urlValues := make(url.Values) + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + // Execute HEAD on objectName. - resp, err := c.executeMethod(ctx, "HEAD", requestMetadata{ + resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{ bucketName: bucketName, objectName: objectName, + queryValues: urlValues, contentSHA256Hex: emptySHA256Hex, customHeader: opts.Header(), }) @@ -127,59 +102,5 @@ func (c Client) statObject(ctx context.Context, bucketName, objectName string, o } } - // Trim off the odd double quotes from ETag in the beginning and end. - md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") - md5sum = strings.TrimSuffix(md5sum, "\"") - - // Parse content length is exists - var size int64 = -1 - contentLengthStr := resp.Header.Get("Content-Length") - if contentLengthStr != "" { - size, err = strconv.ParseInt(contentLengthStr, 10, 64) - if err != nil { - // Content-Length is not valid - return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: "Content-Length is invalid. " + reportIssue, - BucketName: bucketName, - Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), - } - } - } - - // Parse Last-Modified has http time format. - date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) - if err != nil { - return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: "Last-Modified time format is invalid. " + reportIssue, - BucketName: bucketName, - Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), - } - } - - // Fetch content type if any present. - contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) - if contentType == "" { - contentType = "application/octet-stream" - } - - // Save object metadata info. - return ObjectInfo{ - ETag: md5sum, - Key: objectName, - Size: size, - LastModified: date, - ContentType: contentType, - // Extract only the relevant header keys describing the object. - // following function filters out a list of standard set of keys - // which are not part of object metadata. - Metadata: extractObjMetadata(resp.Header), - }, nil + return ToObjectInfo(bucketName, objectName, resp.Header) } diff --git a/api.go b/api.go index 78057e0a2..1805e3800 100644 --- a/api.go +++ b/api.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2018 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2018 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,11 +20,8 @@ package minio import ( "bytes" "context" - "crypto/md5" - "crypto/sha256" "errors" "fmt" - "hash" "io" "io/ioutil" "math/rand" @@ -39,11 +36,11 @@ import ( "sync" "time" + md5simd "github.com/minio/md5-simd" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/signer" "golang.org/x/net/publicsuffix" - - "github.com/minio/minio-go/pkg/credentials" - "github.com/minio/minio-go/pkg/s3signer" - "github.com/minio/minio-go/pkg/s3utils" ) // Client implements Amazon S3 compatible methods. @@ -73,8 +70,9 @@ type Client struct { bucketLocCache *bucketLocationCache // Advanced functionality. - isTraceEnabled bool - traceOutput io.Writer + isTraceEnabled bool + traceErrorsOnly bool + traceOutput io.Writer // S3 specific accelerated endpoint. s3AccelerateEndpoint string @@ -88,29 +86,37 @@ type Client struct { // lookup indicates type of url lookup supported by server. If not specified, // default to Auto. lookup BucketLookupType + + // Factory for MD5 hash functions. + md5Hasher func() md5simd.Hasher + sha256Hasher func() md5simd.Hasher } // Options for New method type Options struct { Creds *credentials.Credentials Secure bool + Transport http.RoundTripper Region string BucketLookup BucketLookupType - // Add future fields here + + // Custom hash routines. Leave nil to use standard. + CustomMD5 func() md5simd.Hasher + CustomSHA256 func() md5simd.Hasher } // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v6.0.17" + libraryVersion = "v7.0.4" ) // User Agent should always following the below style. // Please open an issue to discuss any new changes here. // -// Minio (OS; ARCH) LIB/VER APP/VER +// MinIO (OS; ARCH) LIB/VER APP/VER const ( - libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") " + libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") " libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion ) @@ -124,34 +130,12 @@ const ( BucketLookupPath ) -// NewV2 - instantiate minio client with Amazon S3 signature version -// '2' compatibility. -func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { - creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "") - clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) - if err != nil { - return nil, err - } - clnt.overrideSignerType = credentials.SignatureV2 - return clnt, nil -} - -// NewV4 - instantiate minio client with Amazon S3 signature version -// '4' compatibility. -func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { - creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") - clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) - if err != nil { - return nil, err +// New - instantiate minio client with options +func New(endpoint string, opts *Options) (*Client, error) { + if opts == nil { + return nil, errors.New("no options provided") } - clnt.overrideSignerType = credentials.SignatureV4 - return clnt, nil -} - -// New - instantiate minio client, adds automatic verification of signature. -func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { - creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") - clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) + clnt, err := privateNew(endpoint, opts) if err != nil { return nil, err } @@ -163,27 +147,14 @@ func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, e if s3utils.IsAmazonEndpoint(*clnt.endpointURL) { clnt.overrideSignerType = credentials.SignatureV4 } - return clnt, nil -} - -// NewWithCredentials - instantiate minio client with credentials provider -// for retrieving credentials from various credentials provider such as -// IAM, File, Env etc. -func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) { - return privateNew(endpoint, creds, secure, region, BucketLookupAuto) -} -// NewWithRegion - instantiate minio client, with region configured. Unlike New(), -// NewWithRegion avoids bucket-location lookup operations and it is slightly faster. -// Use this function when if your application deals with single region. -func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) { - creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") - return privateNew(endpoint, creds, secure, region, BucketLookupAuto) + return clnt, nil } -// NewWithOptions - instantiate minio client with options -func NewWithOptions(endpoint string, opts *Options) (*Client, error) { - return privateNew(endpoint, opts.Creds, opts.Secure, opts.Region, opts.BucketLookup) +// EndpointURL returns the URL of the S3 endpoint. +func (c *Client) EndpointURL() *url.URL { + endpoint := *c.endpointURL // copy to prevent callers from modifying internal state + return &endpoint } // UseHostPicker makes the client ignore endpointUrl instead uses the hostPicker passed in @@ -291,15 +262,15 @@ func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error { case signerType.IsV2(): return errors.New("signature V2 cannot support redirection") case signerType.IsV4(): - req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region)) + signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region)) } } return nil } -func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string, lookup BucketLookupType) (*Client, error) { +func privateNew(endpoint string, opts *Options) (*Client, error) { // construct endpoint. - endpointURL, err := getEndpointURL(endpoint, secure) + endpointURL, err := getEndpointURL(endpoint, opts.Secure) if err != nil { return nil, err } @@ -315,26 +286,34 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re clnt := new(Client) // Save the credentials. - clnt.credsProvider = creds + clnt.credsProvider = opts.Creds // Remember whether we are using https or not - clnt.secure = secure + clnt.secure = opts.Secure // Save endpoint URL, user agent for future uses. clnt.endpointURL = endpointURL + transport := opts.Transport + if transport == nil { + transport, err = DefaultTransport(opts.Secure) + if err != nil { + return nil, err + } + } + // Instantiate http client and bucket location cache. clnt.httpClient = &http.Client{ Jar: jar, - Transport: DefaultTransport, + Transport: transport, CheckRedirect: clnt.redirectHeaders, } // Sets custom region, if region is empty bucket location cache is used automatically. - if region == "" { - region = s3utils.GetRegionFromURL(*clnt.endpointURL) + if opts.Region == "" { + opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL) } - clnt.region = region + clnt.region = opts.Region // Instantiate bucket location cache. clnt.bucketLocCache = newBucketLocationCache() @@ -342,9 +321,18 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re // Introduce a new locked random seed. clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) + // Add default md5 hasher. + clnt.md5Hasher = opts.CustomMD5 + clnt.sha256Hasher = opts.CustomSHA256 + if clnt.md5Hasher == nil { + clnt.md5Hasher = newMd5Hasher + } + if clnt.sha256Hasher == nil { + clnt.sha256Hasher = newSHA256Hasher + } // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. - clnt.lookup = lookup + clnt.lookup = opts.BucketLookup // Return. return clnt, nil } @@ -353,36 +341,11 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re func (c *Client) SetAppInfo(appName string, appVersion string) { // if app name and version not set, we do not set a new user agent. if appName != "" && appVersion != "" { - c.appInfo = struct { - appName string - appVersion string - }{} c.appInfo.appName = appName c.appInfo.appVersion = appVersion } } -// SetCustomTransport - set new custom transport. -func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { - // Set this to override default transport - // ``http.DefaultTransport``. - // - // This transport is usually needed for debugging OR to add your - // own custom TLS certificates on the client transport, for custom - // CA's and certs which are not part of standard certificate - // authority follow this example :- - // - // tr := &http.Transport{ - // TLSClientConfig: &tls.Config{RootCAs: pool}, - // DisableCompression: true, - // } - // api.SetCustomTransport(tr) - // - if c.httpClient != nil { - c.httpClient.Transport = customHTTPTransport - } -} - // TraceOn - enable HTTP tracing. func (c *Client) TraceOn(outputStream io.Writer) { // if outputStream is nil then default to os.Stdout. @@ -396,10 +359,23 @@ func (c *Client) TraceOn(outputStream io.Writer) { c.isTraceEnabled = true } +// TraceErrorsOnlyOn - same as TraceOn, but only errors will be traced. +func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) { + c.TraceOn(outputStream) + c.traceErrorsOnly = true +} + +// TraceErrorsOnlyOff - Turns off the errors only tracing and everything will be traced after this call. +// If all tracing needs to be turned off, call TraceOff(). +func (c *Client) TraceErrorsOnlyOff() { + c.traceErrorsOnly = false +} + // TraceOff - disable HTTP tracing. func (c *Client) TraceOff() { // Disable tracing. c.isTraceEnabled = false + c.traceErrorsOnly = false } // SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your @@ -419,20 +395,23 @@ func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { // - For signature v4 request if the connection is insecure compute only sha256. // - For signature v4 request if the connection is secure compute only md5. // - For anonymous request compute md5. -func (c *Client) hashMaterials() (hashAlgos map[string]hash.Hash, hashSums map[string][]byte) { +func (c *Client) hashMaterials(isMd5Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) { hashSums = make(map[string][]byte) - hashAlgos = make(map[string]hash.Hash) + hashAlgos = make(map[string]md5simd.Hasher) if c.overrideSignerType.IsV4() { if c.secure { - hashAlgos["md5"] = md5.New() + hashAlgos["md5"] = c.md5Hasher() } else { - hashAlgos["sha256"] = sha256.New() + hashAlgos["sha256"] = c.sha256Hasher() } } else { if c.overrideSignerType.IsAnonymous() { - hashAlgos["md5"] = md5.New() + hashAlgos["md5"] = c.md5Hasher() } } + if isMd5Requested { + hashAlgos["md5"] = c.md5Hasher() + } return hashAlgos, hashSums } @@ -536,11 +515,12 @@ func (c Client) do(req *http.Request) (*http.Response, error) { // Response cannot be non-nil, report error if thats the case. if resp == nil { msg := "Response is empty. " + reportIssue - return nil, ErrInvalidArgument(msg) + return nil, errInvalidArgument(msg) } - // If trace is enabled, dump http request and response. - if c.isTraceEnabled { + // If trace is enabled, dump http request and response, + // except when the traceErrorsOnly enabled and the response's status code is ok + if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) { err = c.dumpHTTP(req, resp) if err != nil { return nil, err @@ -561,19 +541,26 @@ var successStatus = []int{ // request upon any error up to maxRetries attempts in a binomially // delayed manner using a standard back off algorithm. func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { - var isRetryable bool // Indicates if request can be retried. + var retryable bool // Indicates if request can be retried. var bodySeeker io.Seeker // Extracted seeker from io.Reader. var reqRetry = MaxRetry // Indicates how many times we can retry the request + defer func() { + if err != nil { + // close idle connections before returning, upon error. + c.httpClient.CloseIdleConnections() + } + }() + if metadata.contentBody != nil { // Check if body is seekable then it is retryable. - bodySeeker, isRetryable = metadata.contentBody.(io.Seeker) + bodySeeker, retryable = metadata.contentBody.(io.Seeker) switch bodySeeker { case os.Stdin, os.Stdout, os.Stderr: - isRetryable = false + retryable = false } // Retry only when reader is seekable - if !isRetryable { + if !retryable { reqRetry = 1 } @@ -586,21 +573,21 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque } } - // Create a done channel to control 'newRetryTimer' go routine. - doneCh := make(chan struct{}, 1) + // Create cancel context to control 'newRetryTimer' go routine. + retryCtx, cancel := context.WithCancel(ctx) // Indicate to our routine to exit cleanly upon return. - defer close(doneCh) + defer cancel() // Blank indentifier is kept here on purpose since 'range' without // blank identifiers is only supported since go1.4 // https://golang.org/doc/go1.4#forrange. - for range c.newRetryTimer(reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { + for range c.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) { // Retry executes the following function body if request has an // error until maxRetries have been exhausted, retry attempts are // performed after waiting for a given period of time in a // binomial fashion. - if isRetryable { + if retryable { // Seek back to beginning for each attempt. if _, err = bodySeeker.Seek(0, 0); err != nil { // If seek failed, no need to retry. @@ -610,7 +597,7 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque // Instantiate a new request. var req *http.Request - req, err = c.newRequest(method, metadata) + req, err = c.newRequest(ctx, method, metadata) if err != nil { errResponse := ToErrorResponse(err) if isS3CodeRetryable(errResponse.Code) { @@ -625,12 +612,10 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque // Initiate the request. res, err = c.do(req) if err != nil { - // For supported http requests errors verify. - if isHTTPReqErrorRetryable(err) { - continue // Retry. + if err == context.Canceled || err == context.DeadlineExceeded { + return nil, err } - // For other errors, return here no need to retry. - return nil, err + continue } // For any known successful http status, return quickly. @@ -665,14 +650,33 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque // // Additionally we should only retry if bucketLocation and custom // region is empty. - if metadata.bucketLocation == "" && c.region == "" { - if errResponse.Code == "AuthorizationHeaderMalformed" || errResponse.Code == "InvalidRegion" { - if metadata.bucketName != "" && errResponse.Region != "" { + if c.region == "" { + switch errResponse.Code { + case "AuthorizationHeaderMalformed": + fallthrough + case "InvalidRegion": + fallthrough + case "AccessDenied": + if errResponse.Region == "" { + // Region is empty we simply return the error. + return res, err + } + // Region is not empty figure out a way to + // handle this appropriately. + if metadata.bucketName != "" { // Gather Cached location only if bucketName is present. - if _, cachedLocationError := c.bucketLocCache.Get(metadata.bucketName); cachedLocationError != false { + if location, cachedOk := c.bucketLocCache.Get(metadata.bucketName); cachedOk && location != errResponse.Region { c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) continue // Retry. } + } else { + // This is for ListBuckets() fallback. + if errResponse.Region != metadata.bucketLocation { + // Retry if the error response has a different region + // than the request we just made. + metadata.bucketLocation = errResponse.Region + continue // Retry + } } } } @@ -690,29 +694,30 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque // For all other cases break out of the retry loop. break } + + // Return an error when retry is canceled or deadlined + if e := retryCtx.Err(); e != nil { + return nil, e + } + return res, err } // newRequest - instantiate a new HTTP request for a given method. -func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) { +func (c Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) { // If no method is supplied default to 'POST'. if method == "" { - method = "POST" + method = http.MethodPost } location := metadata.bucketLocation if location == "" { if metadata.bucketName != "" { // Gather location only if bucketName is present. - location, err = c.getBucketLocation(metadata.bucketName) + location, err = c.getBucketLocation(ctx, metadata.bucketName) if err != nil { - if ToErrorResponse(err).Code != "AccessDenied" { - return nil, err - } + return nil, err } - // Upon AccessDenied error on fetching bucket location, default - // to possible locations based on endpoint URL. This can usually - // happen when GetBucketLocation() is disabled using IAM policies. } if location == "" { location = getDefaultLocation(*c.endpointURL, c.region) @@ -720,10 +725,14 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R } // Look if target url supports virtual host. - isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) + // We explicitly disallow MakeBucket calls to not use virtual DNS style, + // since the resolution may fail. + isMakeBucket := (metadata.objectName == "" && method == http.MethodPut && len(metadata.queryValues) == 0) + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) && !isMakeBucket // Construct a new target URL. - targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, isVirtualHost, metadata.queryValues) + targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, + isVirtualHost, metadata.queryValues) if err != nil { return nil, err } @@ -761,14 +770,14 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R // Generate presign url if needed, return right here. if metadata.expires != 0 && metadata.presignURL { if signerType.IsAnonymous() { - return nil, ErrInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") + return nil, errInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") } if signerType.IsV2() { // Presign URL with signature v2. - req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) + req = signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) } else if signerType.IsV4() { // Presign URL with signature v4. - req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) + req = signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) } return req, nil } @@ -811,12 +820,12 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R switch { case signerType.IsV2(): // Add signature version '2' authorization header. - req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) - case metadata.objectName != "" && method == "PUT" && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure: + req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) + case metadata.objectName != "" && metadata.queryValues == nil && method == http.MethodPut && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure: // Streaming signature is used by default for a PUT object request. Additionally we also // look if the initialized client is secure, if yes then we don't need to perform // streaming signature. - req = s3signer.StreamingSignV4(req, accessKeyID, + req = signer.StreamingSignV4(req, accessKeyID, secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) default: // Set sha256 sum for signature calculation only with signature version '4'. @@ -827,7 +836,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R req.Header.Set("X-Amz-Content-Sha256", shaHeader) // Add signature version '4' authorization header. - req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) + req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) } // Return request. @@ -851,7 +860,7 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isV // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html // Disable transfer acceleration for non-compliant bucket names. if strings.Contains(bucketName, ".") { - return nil, ErrTransferAccelerationBucket(bucketName) + return nil, errTransferAccelerationBucket(bucketName) } // If transfer acceleration is requested set new host. // For more details about enabling transfer acceleration read here. diff --git a/api_unit_test.go b/api_unit_test.go index 35b01797e..fb6977d8b 100644 --- a/api_unit_test.go +++ b/api_unit_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,24 +18,13 @@ package minio import ( - "net/http" "net/url" "testing" - "github.com/minio/minio-go/pkg/credentials" - "github.com/minio/minio-go/pkg/policy" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/policy" ) -type customReader struct{} - -func (c *customReader) Read(p []byte) (n int, err error) { - return 0, nil -} - -func (c *customReader) Size() (n int64) { - return 10 -} - // Tests valid hosts for location. func TestValidBucketLocation(t *testing.T) { s3Hosts := []struct { @@ -65,14 +54,8 @@ func TestErrorResponse(t *testing.T) { t.Fatal("Type conversion failed, we have an empty struct.") } - // Test http response decoding. - var httpResponse *http.Response - // Set empty variables - httpResponse = nil - var bucketName, objectName string - // Should fail with invalid argument. - err = httpRespToErrorResponse(httpResponse, bucketName, objectName) + err = httpRespToErrorResponse(nil, "", "") errResp = ToErrorResponse(err) if errResp.Code != "InvalidArgument" { t.Fatal("Empty response input should return invalid argument.") @@ -116,42 +99,70 @@ func TestBucketPolicyTypes(t *testing.T) { // Tests optimal part size. func TestPartSize(t *testing.T) { - _, _, _, err := optimalPartInfo(5000000000000000000) + _, _, _, err := optimalPartInfo(5000000000000000000, minPartSize) if err == nil { t.Fatal("Error: should fail") } - totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(5497558138880) + totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(5243928576, 5*1024*1024) if err != nil { t.Fatal("Error: ", err) } - if totalPartsCount != 9103 { - t.Fatalf("Error: expecting total parts count of 9987: got %v instead", totalPartsCount) + if totalPartsCount != 1001 { + t.Fatalf("Error: expecting total parts count of 1001: got %v instead", totalPartsCount) } - if partSize != 603979776 { - t.Fatalf("Error: expecting part size of 550502400: got %v instead", partSize) + if partSize != 5242880 { + t.Fatalf("Error: expecting part size of 5242880: got %v instead", partSize) } - if lastPartSize != 134217728 { - t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize) + if lastPartSize != 1048576 { + t.Fatalf("Error: expecting last part size of 1048576: got %v instead", lastPartSize) } - _, partSize, _, err = optimalPartInfo(5000000000) + totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(5243928576, 0) + if err != nil { + t.Fatal("Error: ", err) + } + if totalPartsCount != 40 { + t.Fatalf("Error: expecting total parts count of 40: got %v instead", totalPartsCount) + } + if partSize != 134217728 { + t.Fatalf("Error: expecting part size of 134217728: got %v instead", partSize) + } + if lastPartSize != 9437184 { + t.Fatalf("Error: expecting last part size of 9437184: got %v instead", lastPartSize) + } + _, partSize, _, err = optimalPartInfo(5000000000, minPartSize) if err != nil { t.Fatal("Error:", err) } if partSize != minPartSize { t.Fatalf("Error: expecting part size of %v: got %v instead", minPartSize, partSize) } - totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(-1) + // if stream and using default optimal part size determined by sdk + totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(-1, 0) + if err != nil { + t.Fatal("Error:", err) + } + if totalPartsCount != 8192 { + t.Fatalf("Error: expecting total parts count of 8192: got %v instead", totalPartsCount) + } + if partSize != 671088640 { + t.Fatalf("Error: expecting part size of 671088640: got %v instead", partSize) + } + if lastPartSize != 671088640 { + t.Fatalf("Error: expecting last part size of 671088640: got %v instead", lastPartSize) + } + + totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(-1, 64*1024*1024) if err != nil { t.Fatal("Error:", err) } - if totalPartsCount != 9103 { - t.Fatalf("Error: expecting total parts count of 9987: got %v instead", totalPartsCount) + if totalPartsCount != 10000 { + t.Fatalf("Error: expecting total parts count of 10000: got %v instead", totalPartsCount) } - if partSize != 603979776 { - t.Fatalf("Error: expecting part size of 550502400: got %v instead", partSize) + if partSize != 67108864 { + t.Fatalf("Error: expecting part size of 67108864: got %v instead", partSize) } - if lastPartSize != 134217728 { - t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize) + if lastPartSize != 67108864 { + t.Fatalf("Error: expecting part size of 67108864: got %v instead", lastPartSize) } } @@ -188,8 +199,11 @@ func TestMakeTargetURL(t *testing.T) { } for i, testCase := range testCases { - // Initialize a Minio client - c, _ := New(testCase.addr, "foo", "bar", testCase.secure) + // Initialize a MinIO client + c, _ := New(testCase.addr, &Options{ + Creds: credentials.NewStaticV4("foo", "bar", ""), + Secure: testCase.secure, + }) isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, testCase.bucketName) u, err := c.makeTargetURL(testCase.bucketName, testCase.objectName, testCase.bucketLocation, isVirtualHost, testCase.queryValues) // Check the returned error diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 48ea6e77d..000000000 --- a/appveyor.yml +++ /dev/null @@ -1,39 +0,0 @@ -# version format -version: "{build}" - -# Operating system (build VM template) -os: Windows Server 2012 R2 - -clone_folder: c:\gopath\src\github.com\minio\minio-go - -# environment variables -environment: - GOPATH: c:\gopath - GO15VENDOREXPERIMENT: 1 - -# scripts that run after cloning repository -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version - - go env - - go get -u golang.org/x/lint/golint - - go get -u github.com/remyoudompheng/go-misc/deadcode - - go get -u github.com/gordonklaus/ineffassign - - go get -u golang.org/x/crypto/argon2 - - go get -t ./... - -# to run your custom scripts instead of automatic MSBuild -build_script: - - go vet ./... - - gofmt -s -l . - - golint -set_exit_status github.com/minio/minio-go... - - deadcode - - ineffassign . - - go test -short -v - - go test -short -race -v - -# to disable automatic tests -test: off - -# to disable deployment -deploy: off diff --git a/bucket-cache.go b/bucket-cache.go index cac7ad792..7d485a6b1 100644 --- a/bucket-cache.go +++ b/bucket-cache.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,14 +18,16 @@ package minio import ( + "context" + "net" "net/http" "net/url" "path" "sync" - "github.com/minio/minio-go/pkg/credentials" - "github.com/minio/minio-go/pkg/s3signer" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/signer" ) // bucketLocationCache - Provides simple mechanism to hold bucket @@ -71,16 +73,16 @@ func (r *bucketLocationCache) Delete(bucketName string) { // GetBucketLocation - get location for the bucket name from location cache, if not // fetch freshly by making a new request. -func (c Client) GetBucketLocation(bucketName string) (string, error) { +func (c Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) { if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err } - return c.getBucketLocation(bucketName) + return c.getBucketLocation(ctx, bucketName) } // getBucketLocation - Get location for the bucketName from location map cache, if not // fetch freshly by making a new request. -func (c Client) getBucketLocation(bucketName string) (string, error) { +func (c Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) { if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err } @@ -123,8 +125,20 @@ func processBucketLocationResponse(resp *http.Response, bucketName string) (buck // For access denied error, it could be an anonymous // request. Move forward and let the top level callers // succeed if possible based on their policy. - if errResp.Code == "AccessDenied" { - return "us-east-1", nil + switch errResp.Code { + case "NotImplemented": + if errResp.Server == "AmazonSnowball" { + return "snowball", nil + } + case "AuthorizationHeaderMalformed": + fallthrough + case "InvalidRegion": + fallthrough + case "AccessDenied": + if errResp.Region == "" { + return "us-east-1", nil + } + return errResp.Region, nil } return "", err } @@ -161,12 +175,30 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro urlValues.Set("location", "") // Set get bucket location always as path style. - targetURL := c.endpointURL - targetURL.Path = path.Join(bucketName, "") + "/" - targetURL.RawQuery = urlValues.Encode() + targetURL := *c.endpointURL + + // as it works in makeTargetURL method from api.go file + if h, p, err := net.SplitHostPort(targetURL.Host); err == nil { + if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" { + targetURL.Host = h + } + } + + isVirtualHost := s3utils.IsVirtualHostSupported(targetURL, bucketName) + + var urlStr string + + //only support Aliyun OSS for virtual hosted path, compatible Amazon & Google Endpoint + if isVirtualHost && s3utils.IsAliyunOSSEndpoint(targetURL) { + urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location" + } else { + targetURL.Path = path.Join(bucketName, "") + "/" + targetURL.RawQuery = urlValues.Encode() + urlStr = targetURL.String() + } // Get a new HTTP request for the method. - req, err := http.NewRequest("GET", targetURL.String(), nil) + req, err := http.NewRequest(http.MethodGet, urlStr, nil) if err != nil { return nil, err } @@ -205,7 +237,7 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro if signerType.IsV2() { // Get Bucket Location calls should be always path style isVirtualHost := false - req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) + req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) return req, nil } @@ -216,6 +248,6 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro } req.Header.Set("X-Amz-Content-Sha256", contentSha256) - req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") + req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") return req, nil } diff --git a/bucket-cache_test.go b/bucket-cache_test.go index 507b40862..b95857b36 100644 --- a/bucket-cache_test.go +++ b/bucket-cache_test.go @@ -1,6 +1,6 @@ /* * Copyright - * 2015, 2016, 2017 Minio, Inc. + * 2015, 2016, 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,8 +27,8 @@ import ( "reflect" "testing" - "github.com/minio/minio-go/pkg/credentials" - "github.com/minio/minio-go/pkg/s3signer" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/signer" ) // Test validates `newBucketLocationCache`. @@ -67,19 +67,18 @@ func TestBucketLocationCacheOps(t *testing.T) { func TestGetBucketLocationRequest(t *testing.T) { // Generates expected http request for getBucketLocation. // Used for asserting with the actual request generated. - createExpectedRequest := func(c *Client, bucketName string, req *http.Request) (*http.Request, error) { + createExpectedRequest := func(c *Client, bucketName string) (*http.Request, error) { // Set location query. urlValues := make(url.Values) urlValues.Set("location", "") // Set get bucket location always as path style. - targetURL := c.endpointURL + targetURL := *c.endpointURL targetURL.Path = path.Join(bucketName, "") + "/" targetURL.RawQuery = urlValues.Encode() // Get a new HTTP request for the method. - var err error - req, err = http.NewRequest("GET", targetURL.String(), nil) + req, err := http.NewRequest(http.MethodGet, targetURL.String(), nil) if err != nil { return nil, err } @@ -120,9 +119,9 @@ func TestGetBucketLocationRequest(t *testing.T) { contentSha256 = unsignedPayload } req.Header.Set("X-Amz-Content-Sha256", contentSha256) - req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") + req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") case signerType.IsV2(): - req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, false) + req = signer.SignV2(*req, accessKeyID, secretAccessKey, false) } return req, nil @@ -156,10 +155,10 @@ func TestGetBucketLocationRequest(t *testing.T) { {"storage.googleapis.com", "my-access-key", "my-secret-key", false}, {"storage.googleapis.com", "", "my-secret-key", false}, - // endpoint custom domain running Minio server. - {"play.minio.io", "", "", false}, - {"play.minio.io", "my-access-key", "my-secret-key", false}, - {"play.minio.io", "my-acess-key", "", false}, + // endpoint custom domain running MinIO server. + {"play.min.io", "", "", false}, + {"play.min.io", "my-access-key", "my-secret-key", false}, + {"play.min.io", "my-acess-key", "", false}, } testCases := []struct { bucketName string @@ -228,8 +227,10 @@ func TestGetBucketLocationRequest(t *testing.T) { client := &Client{} var err error if testCase.info.endPoint != "" { - - client, err = New(testCase.info.endPoint, testCase.info.accessKey, testCase.info.secretKey, testCase.info.enableInsecure) + client, err = New(testCase.info.endPoint, &Options{ + Creds: credentials.NewStaticV4(testCase.info.accessKey, testCase.info.secretKey, ""), + Secure: testCase.info.enableInsecure, + }) if err != nil { t.Fatalf("Test %d: Failed to create new Client: %s", i+1, err.Error()) } @@ -251,8 +252,7 @@ func TestGetBucketLocationRequest(t *testing.T) { // Test passes as expected, but the output values are verified for correctness here. if err == nil && testCase.shouldPass { - expectedReq := &http.Request{} - expectedReq, err = createExpectedRequest(client, testCase.bucketName, expectedReq) + expectedReq, err := createExpectedRequest(client, testCase.bucketName) if err != nil { t.Fatalf("Test %d: Expected request Creation failed", i+1) } diff --git a/code_of_conduct.md b/code_of_conduct.md new file mode 100644 index 000000000..cb232c3c6 --- /dev/null +++ b/code_of_conduct.md @@ -0,0 +1,80 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior, in compliance with the +licensing terms applying to the Project developments. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. However, these actions shall respect the +licensing terms of the Project Developments that will always supersede such +Code of Conduct. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at dev@min.io. The project team +will review and investigate all complaints, and will respond in a way that it deems +appropriate to the circumstances. The project team is obligated to maintain +confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +This version includes a clarification to ensure that the code of conduct is in +compliance with the free software licensing terms of the project. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/constants.go b/constants.go index 737742318..e1da72e79 100644 --- a/constants.go +++ b/constants.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,9 +23,9 @@ package minio // a part in a multipart upload may not be uploaded. const absMinPartSize = 1024 * 1024 * 5 -// minPartSize - minimum part size 64MiB per object after which +// minPartSize - minimum part size 128MiB per object after which // putObject behaves internally as multipart. -const minPartSize = 1024 * 1024 * 64 +const minPartSize = 1024 * 1024 * 128 // maxPartsCount - maximum number of parts for a single multipart session. const maxPartsCount = 10000 @@ -55,8 +55,32 @@ const ( iso8601DateFormat = "20060102T150405Z" ) -// Storage class header constant. -const amzStorageClass = "X-Amz-Storage-Class" +const ( + // Storage class header. + amzStorageClass = "X-Amz-Storage-Class" + + // Website redirect location header + amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location" + + // Object Tagging headers + amzTaggingHeader = "X-Amz-Tagging" + amzTaggingHeaderDirective = "X-Amz-Tagging-Directive" + + amzVersionID = "X-Amz-Version-Id" + amzTaggingCount = "X-Amz-Tagging-Count" + amzExpiration = "X-Amz-Expiration" + amzReplicationStatus = "X-Amz-Replication-Status" -// Website redirect location header constant -const amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location" + // Object legal hold header + amzLegalHoldHeader = "X-Amz-Object-Lock-Legal-Hold" + + // Object retention header + amzLockMode = "X-Amz-Object-Lock-Mode" + amzLockRetainUntil = "X-Amz-Object-Lock-Retain-Until-Date" + amzBypassGovernance = "X-Amz-Bypass-Governance-Retention" + + // Replication status + amzBucketReplicationStatus = "X-Amz-Replication-Status" + // Minio specific Replication extension + minIOBucketReplicationSourceMTime = "X-Minio-Source-Mtime" +) diff --git a/core.go b/core.go index 4d51363f0..954e1a1bb 100644 --- a/core.go +++ b/core.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,9 @@ package minio import ( "context" "io" - "strings" + "net/http" - "github.com/minio/minio-go/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/encrypt" ) // Core - Inherits Client and adds new methods to expose the low level S3 APIs. @@ -33,9 +33,9 @@ type Core struct { // NewCore - Returns new initialized a Core client, this CoreClient should be // only used under special conditions such as need to access lower primitives // and being able to use them to write your own wrappers. -func NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Core, error) { +func NewCore(endpoint string, opts *Options) (*Core, error) { var s3Client Core - client, err := NewV4(endpoint, accessKeyID, secretAccessKey, secure) + client, err := New(endpoint, opts) if err != nil { return nil, err } @@ -46,108 +46,87 @@ func NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) // ListObjects - List all the objects at a prefix, optionally with marker and delimiter // you can further filter the results. func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { - return c.listObjectsQuery(bucket, prefix, marker, delimiter, maxKeys) + return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys) } // ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses // continuationToken instead of marker to support iteration over the results. -func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) { - return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys, startAfter) +func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) { + return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, fetchOwner, false, delimiter, maxkeys) } // CopyObject - copies an object from source object to destination object on server side. -func (c Core) CopyObject(sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string) (ObjectInfo, error) { - return c.copyObjectDo(context.Background(), sourceBucket, sourceObject, destBucket, destObject, metadata) +func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string) (ObjectInfo, error) { + return c.copyObjectDo(ctx, sourceBucket, sourceObject, destBucket, destObject, metadata) } // CopyObjectPart - creates a part in a multipart upload by copying (a // part of) an existing object. -func (c Core) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, +func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) { - return c.copyObjectPartDo(context.Background(), srcBucket, srcObject, destBucket, destObject, uploadID, + return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID, partID, startOffset, length, metadata) } // PutObject - Upload object. Uploads using single PUT call. -func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string, sse encrypt.ServerSide) (ObjectInfo, error) { - opts := PutObjectOptions{} - m := make(map[string]string) - for k, v := range metadata { - if strings.ToLower(k) == "content-encoding" { - opts.ContentEncoding = v - } else if strings.ToLower(k) == "content-disposition" { - opts.ContentDisposition = v - } else if strings.ToLower(k) == "content-language" { - opts.ContentLanguage = v - } else if strings.ToLower(k) == "content-type" { - opts.ContentType = v - } else if strings.ToLower(k) == "cache-control" { - opts.CacheControl = v - } else if strings.ToLower(k) == strings.ToLower(amzWebsiteRedirectLocation) { - opts.WebsiteRedirectLocation = v - } else { - m[k] = metadata[k] - } - } - opts.UserMetadata = m - opts.ServerSideEncryption = sse - return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts) +func (c Core) PutObject(ctx context.Context, bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, opts PutObjectOptions) (UploadInfo, error) { + return c.putObjectDo(ctx, bucket, object, data, md5Base64, sha256Hex, size, opts) } // NewMultipartUpload - Initiates new multipart upload and returns the new uploadID. -func (c Core) NewMultipartUpload(bucket, object string, opts PutObjectOptions) (uploadID string, err error) { - result, err := c.initiateMultipartUpload(context.Background(), bucket, object, opts) +func (c Core) NewMultipartUpload(ctx context.Context, bucket, object string, opts PutObjectOptions) (uploadID string, err error) { + result, err := c.initiateMultipartUpload(ctx, bucket, object, opts) return result.UploadID, err } // ListMultipartUploads - List incomplete uploads. -func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) { - return c.listMultipartUploadsQuery(bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads) +func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) { + return c.listMultipartUploadsQuery(ctx, bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads) } // PutObjectPart - Upload an object part. -func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { - return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse) +func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { + return c.uploadPart(ctx, bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse) } // ListObjectParts - List uploaded parts of an incomplete upload.x -func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) { - return c.listObjectPartsQuery(bucket, object, uploadID, partNumberMarker, maxParts) +func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) { + return c.listObjectPartsQuery(ctx, bucket, object, uploadID, partNumberMarker, maxParts) } // CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. -func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) (string, error) { - res, err := c.completeMultipartUpload(context.Background(), bucket, object, uploadID, completeMultipartUpload{ +func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart) (string, error) { + res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{ Parts: parts, }) return res.ETag, err } // AbortMultipartUpload - Abort an incomplete upload. -func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error { - return c.abortMultipartUpload(context.Background(), bucket, object, uploadID) +func (c Core) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { + return c.abortMultipartUpload(ctx, bucket, object, uploadID) } // GetBucketPolicy - fetches bucket access policy for a given bucket. -func (c Core) GetBucketPolicy(bucket string) (string, error) { - return c.getBucketPolicy(bucket) +func (c Core) GetBucketPolicy(ctx context.Context, bucket string) (string, error) { + return c.getBucketPolicy(ctx, bucket) } // PutBucketPolicy - applies a new bucket access policy for a given bucket. -func (c Core) PutBucketPolicy(bucket, bucketPolicy string) error { - return c.putBucketPolicy(bucket, bucketPolicy) +func (c Core) PutBucketPolicy(ctx context.Context, bucket, bucketPolicy string) error { + return c.putBucketPolicy(ctx, bucket, bucketPolicy) } // GetObject is a lower level API implemented to support reading // partial objects and also downloading objects with special conditions // matching etag, modtime etc. -func (c Core) GetObject(bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) { - return c.getObject(context.Background(), bucketName, objectName, opts) +func (c Core) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { + return c.getObject(ctx, bucketName, objectName, opts) } // StatObject is a lower level API implemented to support special // conditions matching etag, modtime on a request. -func (c Core) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { - return c.statObject(context.Background(), bucketName, objectName, opts) +func (c Core) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { + return c.statObject(ctx, bucketName, objectName, opts) } diff --git a/core_test.go b/core_test.go index 367a1cffa..7da03ed2d 100644 --- a/core_test.go +++ b/core_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,13 +19,17 @@ package minio import ( "bytes" - "io" + "context" "log" + "net/http" "os" + "strconv" "testing" "time" "math/rand" + + "github.com/minio/minio-go/v7/pkg/credentials" ) const ( @@ -35,8 +39,6 @@ const ( enableSecurity = "ENABLE_HTTPS" ) -// Minimum part size -const MinPartSize = 1024 * 1024 * 64 const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" const ( letterIdxBits = 6 // 6 bits to represent a letter index @@ -74,10 +76,10 @@ func TestGetObjectCore(t *testing.T) { // Instantiate new minio core client object. c, err := NewCore( os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableSecurity)), - ) + &Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableSecurity)), + }) if err != nil { t.Fatal("Error:", err) } @@ -86,13 +88,13 @@ func TestGetObjectCore(t *testing.T) { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, MakeBucketOptions{Region: "us-east-1"}) if err != nil { t.Fatal("Error:", err, bucketName) } @@ -102,15 +104,19 @@ func TestGetObjectCore(t *testing.T) { // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - n, err := c.Client.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), PutObjectOptions{ + _, err = c.Client.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), PutObjectOptions{ ContentType: "binary/octet-stream", }) if err != nil { t.Fatal("Error:", err, bucketName, objectName) } - if n != int64(len(buf)) { - t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) + st, err := c.Client.StatObject(context.Background(), bucketName, objectName, StatObjectOptions{}) + if err != nil { + t.Fatal("Stat error:", err, bucketName, objectName) + } + if st.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size) } offset := int64(2048) @@ -118,16 +124,16 @@ func TestGetObjectCore(t *testing.T) { // read directly buf1 := make([]byte, 512) buf2 := make([]byte, 512) - buf3 := make([]byte, n) + buf3 := make([]byte, st.Size) buf4 := make([]byte, 1) opts := GetObjectOptions{} opts.SetRange(offset, offset+int64(len(buf1))-1) - reader, objectInfo, err := c.GetObject(bucketName, objectName, opts) + reader, objectInfo, _, err := c.GetObject(context.Background(), bucketName, objectName, opts) if err != nil { t.Fatal(err) } - m, err := io.ReadFull(reader, buf1) + m, err := readFull(reader, buf1) reader.Close() if err != nil { t.Fatal(err) @@ -142,12 +148,12 @@ func TestGetObjectCore(t *testing.T) { offset += 512 opts.SetRange(offset, offset+int64(len(buf2))-1) - reader, objectInfo, err = c.GetObject(bucketName, objectName, opts) + reader, objectInfo, _, err = c.GetObject(context.Background(), bucketName, objectName, opts) if err != nil { t.Fatal(err) } - m, err = io.ReadFull(reader, buf2) + m, err = readFull(reader, buf2) reader.Close() if err != nil { t.Fatal(err) @@ -161,12 +167,12 @@ func TestGetObjectCore(t *testing.T) { } opts.SetRange(0, int64(len(buf3))) - reader, objectInfo, err = c.GetObject(bucketName, objectName, opts) + reader, objectInfo, _, err = c.GetObject(context.Background(), bucketName, objectName, opts) if err != nil { t.Fatal(err) } - m, err = io.ReadFull(reader, buf3) + m, err = readFull(reader, buf3) if err != nil { reader.Close() t.Fatal(err) @@ -182,7 +188,7 @@ func TestGetObjectCore(t *testing.T) { opts = GetObjectOptions{} opts.SetMatchETag("etag") - _, _, err = c.GetObject(bucketName, objectName, opts) + _, _, _, err = c.GetObject(context.Background(), bucketName, objectName, opts) if err == nil { t.Fatal("Unexpected GetObject should fail with mismatching etags") } @@ -192,12 +198,12 @@ func TestGetObjectCore(t *testing.T) { opts = GetObjectOptions{} opts.SetMatchETagExcept("etag") - reader, objectInfo, err = c.GetObject(bucketName, objectName, opts) + reader, objectInfo, _, err = c.GetObject(context.Background(), bucketName, objectName, opts) if err != nil { t.Fatal(err) } - m, err = io.ReadFull(reader, buf3) + m, err = readFull(reader, buf3) reader.Close() if err != nil { t.Fatal(err) @@ -212,12 +218,12 @@ func TestGetObjectCore(t *testing.T) { opts = GetObjectOptions{} opts.SetRange(0, 0) - reader, objectInfo, err = c.GetObject(bucketName, objectName, opts) + reader, objectInfo, _, err = c.GetObject(context.Background(), bucketName, objectName, opts) if err != nil { t.Fatal(err) } - m, err = io.ReadFull(reader, buf4) + m, err = readFull(reader, buf4) reader.Close() if err != nil { t.Fatal(err) @@ -227,11 +233,28 @@ func TestGetObjectCore(t *testing.T) { t.Fatalf("Error: GetObject read shorter bytes before reaching EOF, want %v, got %v\n", objectInfo.Size, m) } - err = c.RemoveObject(bucketName, objectName) + opts = GetObjectOptions{} + opts.SetRange(offset, offset+int64(len(buf2))-1) + contentLength := len(buf2) + var header http.Header + _, _, header, err = c.GetObject(context.Background(), bucketName, objectName, opts) + if err != nil { + t.Fatal(err) + } + + contentLengthValue, err := strconv.Atoi(header.Get("Content-Length")) + if err != nil { + t.Fatal("Error: ", err) + } + if contentLength != contentLengthValue { + t.Fatalf("Error: Content Length in response header %v, not equal to set content length %v\n", contentLengthValue, contentLength) + } + + err = c.RemoveObject(context.Background(), bucketName, objectName, RemoveObjectOptions{}) if err != nil { t.Fatal("Error: ", err) } - err = c.RemoveBucket(bucketName) + err = c.RemoveBucket(context.Background(), bucketName) if err != nil { t.Fatal("Error:", err) } @@ -250,10 +273,10 @@ func TestGetObjectContentEncoding(t *testing.T) { // Instantiate new minio core client object. c, err := NewCore( os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableSecurity)), - ) + &Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableSecurity)), + }) if err != nil { t.Fatal("Error:", err) } @@ -262,13 +285,13 @@ func TestGetObjectContentEncoding(t *testing.T) { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, MakeBucketOptions{Region: "us-east-1"}) if err != nil { t.Fatal("Error:", err, bucketName) } @@ -278,24 +301,20 @@ func TestGetObjectContentEncoding(t *testing.T) { // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - n, err := c.Client.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), PutObjectOptions{ + _, err = c.Client.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), PutObjectOptions{ ContentEncoding: "gzip", }) if err != nil { t.Fatal("Error:", err, bucketName, objectName) } - if n != int64(len(buf)) { - t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) - } - - rwc, objInfo, err := c.GetObject(bucketName, objectName, GetObjectOptions{}) + rwc, objInfo, _, err := c.GetObject(context.Background(), bucketName, objectName, GetObjectOptions{}) if err != nil { t.Fatalf("Error: %v", err) } rwc.Close() - if objInfo.Size <= 0 { - t.Fatalf("Unexpected size of the object %v, expected %v", objInfo.Size, n) + if objInfo.Size != int64(len(buf)) { + t.Fatalf("Unexpected size of the object %v, expected %v", objInfo.Size, len(buf)) } value, ok := objInfo.Metadata["Content-Encoding"] if !ok { @@ -305,11 +324,11 @@ func TestGetObjectContentEncoding(t *testing.T) { t.Fatalf("Unexpected content-encoding found, want gzip, got %v", value) } - err = c.RemoveObject(bucketName, objectName) + err = c.RemoveObject(context.Background(), bucketName, objectName, RemoveObjectOptions{}) if err != nil { t.Fatal("Error: ", err) } - err = c.RemoveBucket(bucketName) + err = c.RemoveBucket(context.Background(), bucketName) if err != nil { t.Fatal("Error:", err) } @@ -327,10 +346,10 @@ func TestGetBucketPolicy(t *testing.T) { // Instantiate new minio client object. c, err := NewCore( os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableSecurity)), - ) + &Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableSecurity)), + }) if err != nil { t.Fatal("Error:", err) } @@ -339,20 +358,20 @@ func TestGetBucketPolicy(t *testing.T) { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, MakeBucketOptions{Region: "us-east-1"}) if err != nil { t.Fatal("Error:", err, bucketName) } // Verify if bucket exits and you have access. var exists bool - exists, err = c.BucketExists(bucketName) + exists, err = c.BucketExists(context.Background(), bucketName) if err != nil { t.Fatal("Error:", err, bucketName) } @@ -361,7 +380,7 @@ func TestGetBucketPolicy(t *testing.T) { } // Asserting the default bucket policy. - bucketPolicy, err := c.GetBucketPolicy(bucketName) + bucketPolicy, err := c.GetBucketPolicy(context.Background(), bucketName) if err != nil { errResp := ToErrorResponse(err) if errResp.Code != "NoSuchBucketPolicy" { @@ -372,7 +391,7 @@ func TestGetBucketPolicy(t *testing.T) { t.Errorf("Bucket policy expected %#v, got %#v", "", bucketPolicy) } - err = c.RemoveBucket(bucketName) + err = c.RemoveBucket(context.Background(), bucketName) if err != nil { t.Fatal("Error:", err) } @@ -390,10 +409,10 @@ func TestCoreCopyObject(t *testing.T) { // Instantiate new minio client object. c, err := NewCore( os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableSecurity)), - ) + &Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableSecurity)), + }) if err != nil { t.Fatal("Error:", err) } @@ -402,13 +421,13 @@ func TestCoreCopyObject(t *testing.T) { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, MakeBucketOptions{Region: "us-east-1"}) if err != nil { t.Fatal("Error:", err, bucketName) } @@ -417,38 +436,47 @@ func TestCoreCopyObject(t *testing.T) { // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, nil) + + putopts := PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", putopts) if err != nil { t.Fatal("Error:", err, bucketName, objectName) } - if objInfo.Size != int64(len(buf)) { - t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size) + st, err := c.StatObject(context.Background(), bucketName, objectName, StatObjectOptions{}) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if st.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size) } destBucketName := bucketName destObjectName := objectName + "-dest" - cobjInfo, err := c.CopyObject(bucketName, objectName, destBucketName, destObjectName, map[string]string{ + cuploadInfo, err := c.CopyObject(context.Background(), bucketName, objectName, destBucketName, destObjectName, map[string]string{ "X-Amz-Metadata-Directive": "REPLACE", "Content-Type": "application/javascript", }) if err != nil { t.Fatal("Error:", err, bucketName, objectName, destBucketName, destObjectName) } - if cobjInfo.ETag != objInfo.ETag { - t.Fatalf("Error: expected etag to be same as source object %s, but found different etag :%s", objInfo.ETag, cobjInfo.ETag) + if cuploadInfo.ETag != uploadInfo.ETag { + t.Fatalf("Error: expected etag to be same as source object %s, but found different etag %s", uploadInfo.ETag, cuploadInfo.ETag) } // Attempt to read from destBucketName and object name. - r, err := c.Client.GetObject(destBucketName, destObjectName, GetObjectOptions{}) + r, err := c.Client.GetObject(context.Background(), destBucketName, destObjectName, GetObjectOptions{}) if err != nil { t.Fatal("Error:", err, bucketName, objectName) } - st, err := r.Stat() + st, err = r.Stat() if err != nil { t.Fatal("Error:", err, bucketName, objectName) } @@ -462,8 +490,8 @@ func TestCoreCopyObject(t *testing.T) { t.Fatalf("Error: Content types don't match, expected: application/javascript, found: %+v\n", st.ContentType) } - if st.ETag != objInfo.ETag { - t.Fatalf("Error: expected etag to be same as source object %s, but found different etag :%s", objInfo.ETag, st.ETag) + if st.ETag != uploadInfo.ETag { + t.Fatalf("Error: expected etag to be same as source object %s, but found different etag :%s", uploadInfo.ETag, st.ETag) } if err := r.Close(); err != nil { @@ -474,17 +502,17 @@ func TestCoreCopyObject(t *testing.T) { t.Fatal("Error: object is already closed, should return error") } - err = c.RemoveObject(bucketName, objectName) + err = c.RemoveObject(context.Background(), bucketName, objectName, RemoveObjectOptions{}) if err != nil { t.Fatal("Error: ", err) } - err = c.RemoveObject(destBucketName, destObjectName) + err = c.RemoveObject(context.Background(), destBucketName, destObjectName, RemoveObjectOptions{}) if err != nil { t.Fatal("Error: ", err) } - err = c.RemoveBucket(bucketName) + err = c.RemoveBucket(context.Background(), bucketName) if err != nil { t.Fatal("Error:", err) } @@ -504,10 +532,10 @@ func TestCoreCopyObjectPart(t *testing.T) { // Instantiate new minio client object. c, err := NewCore( os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableSecurity)), - ) + &Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableSecurity)), + }) if err != nil { t.Fatal("Error:", err) } @@ -516,37 +544,45 @@ func TestCoreCopyObjectPart(t *testing.T) { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, MakeBucketOptions{Region: "us-east-1"}) if err != nil { t.Fatal("Error:", err, bucketName) } // Make a buffer with 5MB of data buf := bytes.Repeat([]byte("abcde"), 1024*1024) - + metadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + putopts := PutObjectOptions{ + UserMetadata: metadata, + } // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, nil) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", putopts) if err != nil { t.Fatal("Error:", err, bucketName, objectName) } - if objInfo.Size != int64(len(buf)) { - t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size) + st, err := c.StatObject(context.Background(), bucketName, objectName, StatObjectOptions{}) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if st.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size) } destBucketName := bucketName destObjectName := objectName + "-dest" - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, PutObjectOptions{}) + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, PutObjectOptions{}) if err != nil { t.Fatal("Error:", err, bucketName, objectName) } @@ -556,31 +592,31 @@ func TestCoreCopyObjectPart(t *testing.T) { // `objectName`. // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, nil) + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, nil) if err != nil { t.Fatal("Error:", err, destBucketName, destObjectName) } // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, nil) + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, nil) if err != nil { t.Fatal("Error:", err, destBucketName, destObjectName) } // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, nil) + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, nil) if err != nil { t.Fatal("Error:", err, destBucketName, destObjectName) } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []CompletePart{fstPart, sndPart, lstPart}) if err != nil { t.Fatal("Error:", err, destBucketName, destObjectName) } // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, StatObjectOptions{}) + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, StatObjectOptions{}) if err != nil { t.Fatal("Error:", err, destBucketName, destObjectName) } @@ -592,12 +628,12 @@ func TestCoreCopyObjectPart(t *testing.T) { // Now we read the data back getOpts := GetObjectOptions{} getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { t.Fatal("Error:", err, destBucketName, destObjectName) } getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { t.Fatal("Error:", err, destBucketName, destObjectName) } @@ -606,12 +642,12 @@ func TestCoreCopyObjectPart(t *testing.T) { } getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { t.Fatal("Error:", err, destBucketName, destObjectName) } getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { t.Fatal("Error:", err, destBucketName, destObjectName) } @@ -622,15 +658,15 @@ func TestCoreCopyObjectPart(t *testing.T) { t.Fatal("Got unexpected data in last byte of copied object!") } - if err := c.RemoveObject(destBucketName, destObjectName); err != nil { + if err := c.RemoveObject(context.Background(), destBucketName, destObjectName, RemoveObjectOptions{}); err != nil { t.Fatal("Error: ", err) } - if err := c.RemoveObject(bucketName, objectName); err != nil { + if err := c.RemoveObject(context.Background(), bucketName, objectName, RemoveObjectOptions{}); err != nil { t.Fatal("Error: ", err) } - if err := c.RemoveBucket(bucketName); err != nil { + if err := c.RemoveBucket(context.Background(), bucketName); err != nil { t.Fatal("Error: ", err) } @@ -649,10 +685,10 @@ func TestCorePutObject(t *testing.T) { // Instantiate new minio client object. c, err := NewCore( os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableSecurity)), - ) + &Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableSecurity)), + }) if err != nil { t.Fatal("Error:", err) } @@ -661,13 +697,13 @@ func TestCorePutObject(t *testing.T) { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, MakeBucketOptions{Region: "us-east-1"}) if err != nil { t.Fatal("Error:", err, bucketName) } @@ -680,23 +716,21 @@ func TestCorePutObject(t *testing.T) { objectContentType := "binary/octet-stream" metadata := make(map[string]string) metadata["Content-Type"] = objectContentType - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "1B2M2Y8AsgTpgAmY7PhCfg==", "", metadata, nil) + putopts := PutObjectOptions{ + UserMetadata: metadata, + } + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "1B2M2Y8AsgTpgAmY7PhCfg==", "", putopts) if err == nil { t.Fatal("Error expected: error, got: nil(success)") } - objInfo, err = c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", metadata, nil) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", putopts) if err != nil { t.Fatal("Error:", err, bucketName, objectName) } - if objInfo.Size != int64(len(buf)) { - t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size) - } - // Read the data back - r, err := c.Client.GetObject(bucketName, objectName, GetObjectOptions{}) + r, err := c.Client.GetObject(context.Background(), bucketName, objectName, GetObjectOptions{}) if err != nil { t.Fatal("Error:", err, bucketName, objectName) } @@ -723,12 +757,12 @@ func TestCorePutObject(t *testing.T) { t.Fatal("Error: object is already closed, should return error") } - err = c.RemoveObject(bucketName, objectName) + err = c.RemoveObject(context.Background(), bucketName, objectName, RemoveObjectOptions{}) if err != nil { t.Fatal("Error: ", err) } - err = c.RemoveBucket(bucketName) + err = c.RemoveBucket(context.Background(), bucketName) if err != nil { t.Fatal("Error:", err) } @@ -741,18 +775,19 @@ func TestCoreGetObjectMetadata(t *testing.T) { core, err := NewCore( os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableSecurity))) + &Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableSecurity)), + }) if err != nil { - log.Fatalln(err) + t.Fatal(err) } // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket. - err = core.MakeBucket(bucketName, "us-east-1") + err = core.MakeBucket(context.Background(), bucketName, MakeBucketOptions{Region: "us-east-1"}) if err != nil { t.Fatal("Error:", err, bucketName) } @@ -760,14 +795,17 @@ func TestCoreGetObjectMetadata(t *testing.T) { metadata := map[string]string{ "X-Amz-Meta-Key-1": "Val-1", } + putopts := PutObjectOptions{ + UserMetadata: metadata, + } - _, err = core.PutObject(bucketName, "my-objectname", - bytes.NewReader([]byte("hello")), 5, "", "", metadata, nil) + _, err = core.PutObject(context.Background(), bucketName, "my-objectname", + bytes.NewReader([]byte("hello")), 5, "", "", putopts) if err != nil { log.Fatalln(err) } - reader, objInfo, err := core.GetObject(bucketName, "my-objectname", GetObjectOptions{}) + reader, objInfo, _, err := core.GetObject(context.Background(), bucketName, "my-objectname", GetObjectOptions{}) if err != nil { log.Fatalln(err) } @@ -777,11 +815,11 @@ func TestCoreGetObjectMetadata(t *testing.T) { log.Fatalln("Expected metadata to be available but wasn't") } - err = core.RemoveObject(bucketName, "my-objectname") + err = core.RemoveObject(context.Background(), bucketName, "my-objectname", RemoveObjectOptions{}) if err != nil { t.Fatal("Error: ", err) } - err = core.RemoveBucket(bucketName) + err = core.RemoveBucket(context.Background(), bucketName) if err != nil { t.Fatal("Error:", err) } diff --git a/docs/API.md b/docs/API.md index 577821673..77ae8b0e2 100644 --- a/docs/API.md +++ b/docs/API.md @@ -1,28 +1,35 @@ -# Minio Go Client API Reference [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) +# MinIO Go Client API Reference [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) -## Initialize Minio Client object. +## Initialize MinIO Client object. -## Minio +## MinIO ```go package main import ( - "fmt" + "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { - // Use a secure connection. - ssl := true + endpoint := "play.min.io" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true - // Initialize minio client object. - minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl) - if err != nil { - fmt.Println(err) - return - } + // Initialize minio client object. + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) + if err != nil { + log.Fatalln(err) + } + + log.Printf("%#v\n", minioClient) // minioClient is now setup } ``` @@ -34,15 +41,16 @@ package main import ( "fmt" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/credentials" ) func main() { - // Use a secure connection. - ssl := true - // Initialize minio client object. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ssl) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { fmt.Println(err) return @@ -50,40 +58,44 @@ func main() { } ``` -| Bucket operations | Object operations | Encrypted Object operations | Presigned operations | Bucket Policy/Notification Operations | Client custom settings | -| :--- | :--- | :--- | :--- | :--- | :--- | -| [`MakeBucket`](#MakeBucket) | [`GetObject`](#GetObject) | [`GetObject`](#GetObject) | [`PresignedGetObject`](#PresignedGetObject) | [`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) | -| [`ListBuckets`](#ListBuckets) | [`PutObject`](#PutObject) | [`PutObject`](#PutObject) | [`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) | -| [`BucketExists`](#BucketExists) | [`CopyObject`](#CopyObject) | [`CopyObject`](#CopyObject) | [`PresignedPostPolicy`](#PresignedPostPolicy) | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOn`](#TraceOn) | -| [`RemoveBucket`](#RemoveBucket) | [`StatObject`](#StatObject) | [`StatObject`](#StatObject) | | [`GetBucketNotification`](#GetBucketNotification) | [`TraceOff`](#TraceOff) | -| [`ListObjects`](#ListObjects) | [`RemoveObject`](#RemoveObject) | | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) | -| [`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | | | [`ListenBucketNotification`](#ListenBucketNotification) | | -| [`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | [`SetBucketLifecycle`](#SetBucketLifecycle) | | -| | [`FPutObject`](#FPutObject) | [`FPutObject`](#FPutObject) | | [`GetBucketLifecycle`](#GetBucketLifecycle) | | -| | [`FGetObject`](#FGetObject) | [`FGetObject`](#FGetObject) | | | | -| | [`ComposeObject`](#ComposeObject) | [`ComposeObject`](#ComposeObject) | | | | -| | [`NewSourceInfo`](#NewSourceInfo) | [`NewSourceInfo`](#NewSourceInfo) | | | | -| | [`NewDestinationInfo`](#NewDestinationInfo) | [`NewDestinationInfo`](#NewDestinationInfo) | | | | -| | [`PutObjectWithContext`](#PutObjectWithContext) | [`PutObjectWithContext`](#PutObjectWithContext) | | | -| | [`GetObjectWithContext`](#GetObjectWithContext) | [`GetObjectWithContext`](#GetObjectWithContext) | | | -| | [`FPutObjectWithContext`](#FPutObjectWithContext) | [`FPutObjectWithContext`](#FPutObjectWithContext) | | | -| | [`FGetObjectWithContext`](#FGetObjectWithContext) | [`FGetObjectWithContext`](#FGetObjectWithContext) | | | -| | [`RemoveObjectsWithContext`](#RemoveObjectsWithContext) | | | | -| | [`SelectObjectContent`](#SelectObjectContent) | | +| Bucket operations | Object operations | Encrypted Object operations | Presigned operations | Bucket Policy/Notification Operations | Client custom settings | +| :--- | :--- | :--- | :--- | :--- | :--- | +| [`MakeBucket`](#MakeBucket) | [`GetObject`](#GetObject) | [`GetObject`](#GetObject) | [`PresignedGetObject`](#PresignedGetObject) | [`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) | +| [`PutObject`](#PutObject) | [`PutObject`](#PutObject) | [`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) | | +| [`ListBuckets`](#ListBuckets) | [`CopyObject`](#CopyObject) | [`CopyObject`](#CopyObject) | [`PresignedPostPolicy`](#PresignedPostPolicy) | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOn`](#TraceOn) | +| [`BucketExists`](#BucketExists) | [`StatObject`](#StatObject) | [`StatObject`](#StatObject) | | [`GetBucketNotification`](#GetBucketNotification) | [`TraceOff`](#TraceOff) | +| [`RemoveBucket`](#RemoveBucket) | [`RemoveObject`](#RemoveObject) | [`FPutObject`](#FPutObject) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) | +| [`ListObjects`](#ListObjects) | [`RemoveObjects`](#RemoveObjects) | [`FGetObject`](#FGetObject) | | [`ListenBucketNotification`](#ListenBucketNotification) | | +| | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | [`ComposeObject`](#ComposeObjecet) | | [`SetBucketLifecycle`](#SetBucketLifecycle) | | +| [`ListIncompleteUploads`](#ListIncompleteUploads) | [`FPutObject`](#FPutObject) | | | [`GetBucketLifecycle`](#GetBucketLifecycle) | | +| [`SetBucketTagging`](#SetBucketTagging) | [`FGetObject`](#FGetObject) | | | [`SetObjectLockConfig`](#SetObjectLockConfig) | | +| [`GetBucketTagging`](#GetBucketTagging) | [`ComposeObject`](#ComposeObject) | | | [`GetObjectLockConfig`](#GetObjectLockConfig) | | +| [`RemoveBucketTagging`](#RemoveBucketTagging) | | | | [`EnableVersioning`](#EnableVersioning) | | +| [`SetBucketReplication`](#SetBucketReplication) | | | | [`DisableVersioning`](#DisableVersioning) | | +| [`GetBucketReplication`](#GetBucketReplication) | [`PutObjectRetention`](#PutObjectRetention) | | | [`GetBucketEncryption`](#GetBucketEncryption) | | +| [`RemoveBucketReplication`](#RemoveBucketReplication) | [`GetObjectRetention`](#GetObjectRetention) | | | [`RemoveBucketEncryption`](#RemoveBucketEncryption) | | +| | [`PutObjectLegalHold`](#PutObjectLegalHold) | | | | | +| | [`GetObjectLegalHold`](#GetObjectLegalHold) | | | | | +| | [`SelectObjectContent`](#SelectObjectContent) | | | | | +| | [`PutObjectTagging`](#PutObjectTagging) | | | | | +| | [`GetObjectTagging`](#GetObjectTagging) | | | | | +| | [`RemoveObjectTagging`](#RemoveObjectTagging) | | | | | +| | | | | | | + ## 1. Constructor - + ### New(endpoint, accessKeyID, secretAccessKey string, ssl bool) (*Client, error) Initializes a new client object. __Parameters__ -|Param |Type |Description | -|:---|:---| :---| -|`endpoint` | _string_ |S3 compatible object storage endpoint | -|`accessKeyID` |_string_ |Access key for the object storage | -|`secretAccessKey` | _string_ |Secret key for the object storage | -|`ssl` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise | +| Param | Type | Description | +|:------------------|:---------|:-----------------------------------------------------------------------------| +| `endpoint` | _string_ | S3 compatible object storage endpoint | +| `accessKeyID` | _string_ | Access key for the object storage | +| `secretAccessKey` | _string_ | Secret key for the object storage | +| `ssl` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise | ### NewWithRegion(endpoint, accessKeyID, secretAccessKey string, ssl bool, region string) (*Client, error) Initializes minio client, with region configured. Unlike New(), NewWithRegion avoids bucket-location lookup operations and it is slightly faster. Use this function when your application deals with a single region. @@ -93,50 +105,66 @@ Initializes minio client with options configured. __Parameters__ -|Param |Type |Description | -|:---|:---| :---| -|`endpoint` | _string_ |S3 compatible object storage endpoint | -|`opts` |_minio.Options_ | Options for constructing a new client| +| Param | Type | Description | +|:-----------|:----------------|:--------------------------------------| +| `endpoint` | _string_ | S3 compatible object storage endpoint | +| `opts` | _minio.Options_ | Options for constructing a new client | __minio.Options__ -|Field | Type | Description | -|:--- |:--- | :--- | -| `opts.Creds` | _*credentials.Credentials_ | Access Credentials| -| `opts.Secure` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise | -| `opts.Region` | _string_ | region | -| `opts.BucketLookup` | _BucketLookupType_ | Bucket lookup type can be one of the following values | -| | | _minio.BucketLookupDNS_ | -| | | _minio.BucketLookupPath_ | -| | | _minio.BucketLookupAuto_ | +| Field | Type | Description | +|:--------------------|:---------------------------|:-----------------------------------------------------------------------------| +| `opts.Creds` | _*credentials.Credentials_ | Access Credentials | +| `opts.Secure` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise | +| `opts.Region` | _string_ | region | +| `opts.BucketLookup` | _BucketLookupType_ | Bucket lookup type can be one of the following values | +| | | _minio.BucketLookupDNS_ | +| | | _minio.BucketLookupPath_ | +| | | _minio.BucketLookupAuto_ | ## 2. Bucket operations -### MakeBucket(bucketName, location string) error +### MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) Creates a new bucket. __Parameters__ -| Param | Type | Description | -|---|---|---| -|`bucketName` | _string_ | Name of the bucket | -| `location` | _string_ | Region where the bucket is to be created. Default value is us-east-1. Other valid values are listed below. Note: When used with minio server, use the region specified in its config file (defaults to us-east-1).| -| | |us-east-1 | -| | |us-west-1 | -| | |us-west-2 | -| | |eu-west-1 | -| | | eu-central-1| -| | | ap-southeast-1| -| | | ap-northeast-1| -| | | ap-southeast-2| -| | | sa-east-1| +| Param | Type | Description | +|--------------|---------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `ctx` | _context.Context_ | Custom context for timeout/cancellation of the call | +| `bucketName` | _string_ | Name of the bucket | +| `opts` | _minio.MakeBucketOptions_ | Bucket options such as `Region` where the bucket is to be created. Default value is us-east-1. Other valid values are listed below. Note: When used with minio server, use the region specified in its config file (defaults to us-east-1). | +| | | us-east-1 | +| | | us-east-2 | +| | | us-west-1 | +| | | us-west-2 | +| | | ca-central-1 | +| | | eu-west-1 | +| | | eu-west-2 | +| | | eu-west-3 | +| | | eu-central-1 | +| | | eu-north-1 | +| | | ap-east-1 | +| | | ap-south-1 | +| | | ap-southeast-1 | +| | | ap-southeast-2 | +| | | ap-northeast-1 | +| | | ap-northeast-2 | +| | | ap-northeast-3 | +| | | me-south-1 | +| | | sa-east-1 | +| | | us-gov-west-1 | +| | | us-gov-east-1 | +| | | cn-north-1 | +| | | cn-northwest-1 | __Example__ ```go -err = minioClient.MakeBucket("mybucket", "us-east-1") +// Create a bucket at region 'us-east-1' with object locking enabled. +err = minioClient.MakeBucket(context.Background(), "mybucket", minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) if err != nil { fmt.Println(err) return @@ -145,11 +173,12 @@ fmt.Println("Successfully created mybucket.") ``` -### ListBuckets() ([]BucketInfo, error) +### ListBuckets(ctx context.Context) ([]BucketInfo, error) Lists all buckets. | Param | Type | Description | |---|---|---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketList` | _[]minio.BucketInfo_ | Lists of all buckets | @@ -165,7 +194,7 @@ __Example__ ```go -buckets, err := minioClient.ListBuckets() +buckets, err := minioClient.ListBuckets(context.Background()) if err != nil { fmt.Println(err) return @@ -176,7 +205,7 @@ for _, bucket := range buckets { ``` -### BucketExists(bucketName string) (found bool, err error) +### BucketExists(ctx context.Context, bucketName string) (found bool, err error) Checks if a bucket exists. __Parameters__ @@ -184,6 +213,7 @@ __Parameters__ |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | @@ -199,7 +229,7 @@ __Example__ ```go -found, err := minioClient.BucketExists("mybucket") +found, err := minioClient.BucketExists(context.Background(), "mybucket") if err != nil { fmt.Println(err) return @@ -210,7 +240,7 @@ if found { ``` -### RemoveBucket(bucketName string) error +### RemoveBucket(ctx context.Context, bucketName string) error Removes a bucket, bucket should be empty to be successfully removed. __Parameters__ @@ -218,13 +248,14 @@ __Parameters__ |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | __Example__ ```go -err = minioClient.RemoveBucket("mybucket") +err = minioClient.RemoveBucket(context.Background(), "mybucket") if err != nil { fmt.Println(err) return @@ -232,18 +263,17 @@ if err != nil { ``` -### ListObjects(bucketName, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo +### ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo Lists objects in a bucket. __Parameters__ -|Param |Type |Description | -|:---|:---| :---| -|`bucketName` | _string_ |Name of the bucket | -|`objectPrefix` |_string_ | Prefix of objects to be listed | -|`recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. | -|`doneCh` | _chan struct{}_ | A message on this channel ends the ListObjects iterator. | +| Param | Type | Description | +|:-------------|:---------------------------|:----------------------------------------------------| +| `ctx` | _context.Context_ | Custom context for timeout/cancellation of the call | +| `bucketName` | _string_ | Name of the bucket | +| `opts` | _minio.ListObjectsOptions_ | Options per to list objects | __Return Value__ @@ -263,14 +293,14 @@ __minio.ObjectInfo__ ```go -// Create a done channel to control 'ListObjects' go routine. -doneCh := make(chan struct{}) +ctx, cancel := context.WithCancel(context.Background()) -// Indicate to our routine to exit cleanly upon return. -defer close(doneCh) +defer cancel() -isRecursive := true -objectCh := minioClient.ListObjects("mybucket", "myprefix", isRecursive, doneCh) +objectCh := minioClient.ListObjects(ctx, "mybucket", ListObjectOptions{ + Prefix: "myprefix", + Recursive: true, +}) for object := range objectCh { if object.Err != nil { fmt.Println(object.Err) @@ -281,60 +311,20 @@ for object := range objectCh { ``` - -### ListObjectsV2(bucketName, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo -Lists objects in a bucket using the recommended listing API v2 - -__Parameters__ - - -|Param |Type |Description | -|:---|:---| :---| -|`bucketName` | _string_ |Name of the bucket | -| `objectPrefix` |_string_ | Prefix of objects to be listed | -| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. | -|`doneCh` | _chan struct{}_ | A message on this channel ends the ListObjectsV2 iterator. | - - -__Return Value__ - -|Param |Type |Description | -|:---|:---| :---| -|`objectInfo` | _chan minio.ObjectInfo_ |Read channel for all the objects in the bucket, the object is of the format listed below: | - - -```go -// Create a done channel to control 'ListObjectsV2' go routine. -doneCh := make(chan struct{}) - -// Indicate to our routine to exit cleanly upon return. -defer close(doneCh) - -isRecursive := true -objectCh := minioClient.ListObjectsV2("mybucket", "myprefix", isRecursive, doneCh) -for object := range objectCh { - if object.Err != nil { - fmt.Println(object.Err) - return - } - fmt.Println(object) -} -``` - -### ListIncompleteUploads(bucketName, prefix string, recursive bool, doneCh chan struct{}) <- chan ObjectMultipartInfo +### ListIncompleteUploads(ctx context.Context, bucketName, prefix string, recursive bool) <- chan ObjectMultipartInfo Lists partially uploaded objects in a bucket. __Parameters__ -|Param |Type |Description | -|:---|:---| :---| -|`bucketName` | _string_ |Name of the bucket | -| `prefix` |_string_ | Prefix of objects that are partially uploaded | -| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. | -|`doneCh` | _chan struct{}_ | A message on this channel ends the ListenIncompleteUploads iterator. | +| Param | Type | Description | +|:-------------|:------------------|:---------------------------------------------------------------------------------------------------------| +| `ctx` | _context.Context_ | Custom context for timeout/cancellation of the call | +| `bucketName` | _string_ | Name of the bucket | +| `prefix` | _string_ | Prefix of objects that are partially uploaded | +| `recursive` | _bool_ | `true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. | __Return Value__ @@ -355,14 +345,8 @@ __Example__ ```go -// Create a done channel to control 'ListObjects' go routine. -doneCh := make(chan struct{}) - -// Indicate to our routine to exit cleanly upon return. -defer close(doneCh) - isRecursive := true // Recursively list everything at 'myprefix' -multiPartObjectCh := minioClient.ListIncompleteUploads("mybucket", "myprefix", isRecursive, doneCh) +multiPartObjectCh := minioClient.ListIncompleteUploads(context.Background(), "mybucket", "myprefix", isRecursive) for multiPartObject := range multiPartObjectCh { if multiPartObject.Err != nil { fmt.Println(multiPartObject.Err) @@ -372,97 +356,105 @@ for multiPartObject := range multiPartObjectCh { } ``` -## 3. Object operations - - -### GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) -Returns a stream of the object data. Most of the common errors occur when reading the stream. + +### SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error +Sets tags to a bucket. __Parameters__ +| Param | Type | Description | +|:-------------|:------------------|:----------------------------------------------------| +| `ctx` | _context.Context_ | Custom context for timeout/cancellation of the call | +| `bucketName` | _string_ | Name of the bucket | +| `tags` | _*tags.Tags_ | Bucket tags | +__Example__ +```go +// Create tags from a map. +tags, err := tags.NewTags(map[string]string{ + "Tag1": "Value1", + "Tag2": "Value2", +}, false) +if err != nil { + log.Fatalln(err) +} -|Param |Type |Description | -|:---|:---| :---| -|`bucketName` | _string_ |Name of the bucket | -|`objectName` | _string_ |Name of the object | -|`opts` | _minio.GetObjectOptions_ | Options for GET requests specifying additional options like encryption, If-Match | +err = minioClient.SetBucketTagging(context.Background(), "my-bucketname", tags) +if err != nil { + log.Fatalln(err) +} +``` + +### GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) +Gets tags of a bucket. -__minio.GetObjectOptions__ -|Field | Type | Description | -|:---|:---|:---| -| `opts.ServerSideEncryption` | _encrypt.ServerSide_ | Interface provided by `encrypt` package to specify server-side-encryption. (For more information see https://godoc.org/github.com/minio/minio-go) | +__Parameters__ +| Param | Type | Description | +|:-------------|:-------------|:-------------------| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +| `bucketName` | _string_ | Name of the bucket | __Return Value__ - -|Param |Type |Description | -|:---|:---| :---| -|`object` | _*minio.Object_ |_minio.Object_ represents object reader. It implements io.Reader, io.Seeker, io.ReaderAt and io.Closer interfaces. | - +| Param | Type | Description | +|:-------|:-------------|:------------| +| `tags` | _*tags.Tags_ | Bucket tags | __Example__ - - ```go -object, err := minioClient.GetObject("mybucket", "myobject", minio.GetObjectOptions{}) -if err != nil { - fmt.Println(err) - return -} -localFile, err := os.Create("/tmp/local-file.jpg") +tags, err := minioClient.GetBucketTagging(context.Background(), "my-bucketname") if err != nil { - fmt.Println(err) - return -} -if _, err = io.Copy(localFile, object); err != nil { - fmt.Println(err) - return + log.Fatalln(err) } -``` - -### FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error -Downloads and saves the object as a file in the local filesystem. - -__Parameters__ +fmt.Printf("Fetched Object Tags: %v\n", tags) +``` + +### RemoveBucketTagging(ctx context.Context, bucketName string) error +Removes all tags on a bucket. -|Param |Type |Description | -|:---|:---| :---| -|`bucketName` | _string_ |Name of the bucket | -|`objectName` | _string_ |Name of the object | -|`filePath` | _string_ |Path to download object to | -|`opts` | _minio.GetObjectOptions_ | Options for GET requests specifying additional options like encryption, If-Match | +__Parameters__ +| Param | Type | Description | +|:-------------|:-------------|:-------------------| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +| `bucketName` | _string_ | Name of the bucket | __Example__ - - ```go -err = minioClient.FGetObject("mybucket", "myobject", "/tmp/myobject", minio.GetObjectOptions{}) +err := minioClient.RemoveBucketTagging(context.Background(), "my-bucketname") if err != nil { - fmt.Println(err) - return + log.Fatalln(err) } ``` - -### GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) -Identical to GetObject operation, but accepts a context for request cancellation. + +## 3. Object operations + + +### GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) +Returns a stream of the object data. Most of the common errors occur when reading the stream. + __Parameters__ |Param |Type |Description | |:---|:---| :---| -|`ctx` | _context.Context_ |Request context | +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | |`objectName` | _string_ |Name of the object | |`opts` | _minio.GetObjectOptions_ | Options for GET requests specifying additional options like encryption, If-Match | +__minio.GetObjectOptions__ + +|Field | Type | Description | +|:---|:---|:---| +| `opts.ServerSideEncryption` | _encrypt.ServerSide_ | Interface provided by `encrypt` package to specify server-side-encryption. (For more information see https://godoc.org/github.com/minio/minio-go/v7) | + __Return Value__ @@ -475,37 +467,32 @@ __Example__ ```go -ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second) -defer cancel() - -object, err := minioClient.GetObjectWithContext(ctx, "mybucket", "myobject", minio.GetObjectOptions{}) +object, err := minioClient.GetObject(context.Background(), "mybucket", "myobject", minio.GetObjectOptions{}) if err != nil { fmt.Println(err) return } - localFile, err := os.Create("/tmp/local-file.jpg") if err != nil { fmt.Println(err) return } - if _, err = io.Copy(localFile, object); err != nil { fmt.Println(err) return } ``` - -### FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error -Identical to FGetObject operation, but allows request cancellation. + +### FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error +Downloads and saves the object as a file in the local filesystem. __Parameters__ |Param |Type |Description | |:---|:---| :---| -|`ctx` | _context.Context_ |Request context | +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | |`objectName` | _string_ |Name of the object | |`filePath` | _string_ |Path to download object to | @@ -516,10 +503,7 @@ __Example__ ```go -ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second) -defer cancel() - -err = minioClient.FGetObjectWithContext(ctx, "mybucket", "myobject", "/tmp/myobject", minio.GetObjectOptions{}) +err = minioClient.FGetObject(context.Background(), "mybucket", "myobject", "/tmp/myobject", minio.GetObjectOptions{}) if err != nil { fmt.Println(err) return @@ -527,14 +511,14 @@ if err != nil { ``` -### PutObject(bucketName, objectName string, reader io.Reader, objectSize int64,opts PutObjectOptions) (n int, err error) -Uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than 64MiB in size, PutObject seamlessly uploads the object as parts of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB. +### PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,opts PutObjectOptions) (info UploadInfo, err error) +Uploads objects that are less than 128MiB in a single PUT operation. For objects that are greater than 128MiB in size, PutObject seamlessly uploads the object as parts of 128MiB or more depending on the actual file size. The max upload size for an object is 5TB. __Parameters__ - |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | |`objectName` | _string_ |Name of the object | |`reader` | _io.Reader_ |Any Go type that implements io.Reader | @@ -543,18 +527,35 @@ __Parameters__ __minio.PutObjectOptions__ -|Field | Type | Description | -|:--- |:--- | :--- | -| `opts.UserMetadata` | _map[string]string_ | Map of user metadata| -| `opts.Progress` | _io.Reader_ | Reader to fetch progress of an upload | -| `opts.ContentType` | _string_ | Content type of object, e.g "application/text" | -| `opts.ContentEncoding` | _string_ | Content encoding of object, e.g "gzip" | -| `opts.ContentDisposition` | _string_ | Content disposition of object, "inline" | -| `opts.ContentLanguage` | _string_ | Content language of object, e.g "French" | -| `opts.CacheControl` | _string_ | Used to specify directives for caching mechanisms in both requests and responses e.g "max-age=600"| -| `opts.ServerSideEncryption` | _encrypt.ServerSide_ | Interface provided by `encrypt` package to specify server-side-encryption. (For more information see https://godoc.org/github.com/minio/minio-go) | -| `opts.StorageClass` | _string_ | Specify storage class for the object. Supported values for Minio server are `REDUCED_REDUNDANCY` and `STANDARD` | -| `opts.WebsiteRedirectLocation` | _string_ | Specify a redirect for the object, to another object in the same bucket or to a external URL. | +| Field | Type | Description | +|:-------------------------------|:-----------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `opts.UserMetadata` | _map[string]string_ | Map of user metadata | +| `opts.UserTags` | _map[string]string_ | Map of user object tags | +| `opts.Progress` | _io.Reader_ | Reader to fetch progress of an upload | +| `opts.ContentType` | _string_ | Content type of object, e.g "application/text" | +| `opts.ContentEncoding` | _string_ | Content encoding of object, e.g "gzip" | +| `opts.ContentDisposition` | _string_ | Content disposition of object, "inline" | +| `opts.ContentLanguage` | _string_ | Content language of object, e.g "French" | +| `opts.CacheControl` | _string_ | Used to specify directives for caching mechanisms in both requests and responses e.g "max-age=600" | +| `opts.Mode` | _*minio.RetentionMode_ | Retention mode to be set, e.g "COMPLIANCE" | +| `opts.RetainUntilDate` | _*time.Time_ | Time until which the retention applied is valid | +| `opts.ServerSideEncryption` | _encrypt.ServerSide_ | Interface provided by `encrypt` package to specify server-side-encryption. (For more information see https://godoc.org/github.com/minio/minio-go/v7) | +| `opts.StorageClass` | _string_ | Specify storage class for the object. Supported values for MinIO server are `REDUCED_REDUNDANCY` and `STANDARD` | +| `opts.WebsiteRedirectLocation` | _string_ | Specify a redirect for the object, to another object in the same bucket or to a external URL. | +| `opts.SendContentMd5` | _bool_ | Specify if you'd like to send `content-md5` header with PutObject operation. Note that setting this flag will cause higher memory usage because of in-memory `md5sum` calculation. | +| `opts.PartSize` | _uint64_ | Specify a custom part size used for uploading the object | +| `opts.ReplicationVersionID` | _string_ | Specify VersionID of object to replicate.This option is intended for internal use by MinIO server to extend the replication API implementation by AWS. This option should not be set unless the application is aware of intended use. | +| `opts.ReplicationStatus` | _minio.ReplicationStatus_ | Specify replication status of object. This option is intended for internal use by MinIO server to extend the replication API implementation by AWS. This option should not be set unless the application is aware of intended use. | +| `opts.ReplicationMTime` | _time.Time_ | Preserve source modTime on the replicated object. This option is intended for internal use only by MinIO server to comply with AWS bucket replication implementation. This option should not be set unless the application is aware of intended use. | + + +__minio.UploadInfo__ + +| Field | Type | Description | +|:--------------------|:---------|:-------------------------------------------| +| `info.ETag` | _string_ | The ETag of the new object | +| `info.VersionID` | _string_ | The version identifyer of the new object | + __Example__ @@ -573,76 +574,38 @@ if err != nil { return } -n, err := minioClient.PutObject("mybucket", "myobject", file, fileStat.Size(), minio.PutObjectOptions{ContentType:"application/octet-stream"}) +uploadInfo, err := minioClient.PutObject(context.Background(), "mybucket", "myobject", file, fileStat.Size(), minio.PutObjectOptions{ContentType:"application/octet-stream"}) if err != nil { fmt.Println(err) return } -fmt.Println("Successfully uploaded bytes: ", n) +fmt.Println("Successfully uploaded bytes: ", uploadInfo) ``` API methods PutObjectWithSize, PutObjectWithMetadata, PutObjectStreaming, and PutObjectWithProgress available in minio-go SDK release v3.0.3 are replaced by the new PutObject call variant that accepts a pointer to PutObjectOptions struct. - -### PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, opts PutObjectOptions) (n int, err error) -Identical to PutObject operation, but allows request cancellation. - -__Parameters__ - - -|Param |Type |Description | -|:---|:---| :---| -|`ctx` | _context.Context_ |Request context | -|`bucketName` | _string_ |Name of the bucket | -|`objectName` | _string_ |Name of the object | -|`reader` | _io.Reader_ |Any Go type that implements io.Reader | -|`objectSize`| _int64_ | size of the object being uploaded. Pass -1 if stream size is unknown | -|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding, content-disposition, content-language and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. | - - -__Example__ - - -```go -ctx, cancel := context.WithTimeout(context.Background(), 10 * time.Second) -defer cancel() - -file, err := os.Open("my-testfile") -if err != nil { - fmt.Println(err) - return -} -defer file.Close() - -fileStat, err := file.Stat() -if err != nil { - fmt.Println(err) - return -} - -n, err := minioClient.PutObjectWithContext(ctx, "my-bucketname", "my-objectname", file, fileStat.Size(), minio.PutObjectOptions{ - ContentType: "application/octet-stream", -}) -if err != nil { - fmt.Println(err) - return -} -fmt.Println("Successfully uploaded bytes: ", n) -``` -### CopyObject(dst DestinationInfo, src SourceInfo) error -Create or replace an object through server-side copying of an existing object. It supports conditional copying, copying a part of an object and server-side encryption of destination and decryption of source. See the `SourceInfo` and `DestinationInfo` types for further details. +### CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) +Create or replace an object through server-side copying of an existing object. It supports conditional copying, copying a part of an object and server-side encryption of destination and decryption of source. See the `CopySrcOptions` and `DestinationInfo` types for further details. To copy multiple source objects into a single destination object see the `ComposeObject` API. __Parameters__ +| Param | Type | Description | +|:------|:------------------------|:----------------------------------------------------| +| `ctx` | _context.Context_ | Custom context for timeout/cancellation of the call | +| `dst` | _minio.CopyDestOptions_ | Argument describing the destination object | +| `src` | _minio.CopySrcOptions_ | Argument describing the source object | -|Param |Type |Description | -|:---|:---| :---| -|`dst` | _minio.DestinationInfo_ |Argument describing the destination object | -|`src` | _minio.SourceInfo_ |Argument describing the source object | + +__minio.UploadInfo__ + +| Field | Type | Description | +|:-----------------|:---------|:-----------------------------------------| +| `info.ETag` | _string_ | The ETag of the new object | +| `info.VersionID` | _string_ | The version identifyer of the new object | __Example__ @@ -651,21 +614,25 @@ __Example__ ```go // Use-case 1: Simple copy object with no conditions. // Source object -src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil) +srcOpts := minio.CopySrcOptions{ + Bucket: "my-sourcebucketname", + Object: "my-sourceobjectname", +} // Destination object -dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil) -if err != nil { - fmt.Println(err) - return +dstOpts := minio.CopyDestOptions{ + Bucket: "my-bucketname", + Object: "my-objectname", } // Copy object call -err = minioClient.CopyObject(dst, src) +uploadInfo, err := minioClient.CopyObject(context.Background(), dst, src) if err != nil { fmt.Println(err) return } + +fmt.Println("Successfully copied object:", uploadInfo) ``` ```go @@ -677,51 +644,58 @@ if err != nil { // 4. copy only first 1MiB of object. // Source object -src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil) - -// Set matching ETag condition, copy object which matches the following ETag. -src.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a") - -// Set modified condition, copy object modified since 2014 April 1. -src.SetModifiedSinceCond(time.Date(2014, time.April, 1, 0, 0, 0, 0, time.UTC)) - -// Set unmodified condition, copy object unmodified since 2014 April 23. -src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 23, 0, 0, 0, 0, time.UTC)) +srcOpts := minio.CopySrcOptions{ + Bucket: "my-sourcebucketname", + Object: "my-sourceobjectname", + MatchETag: "31624deb84149d2f8ef9c385918b653a", + MatchModifiedSince: time.Date(2014, time.April, 1, 0, 0, 0, 0, time.UTC), + MatchUnmodifiedSince: time.Date(2014, time.April, 23, 0, 0, 0, 0, time.UTC), + Start: 0, + End: 1024*1024-1, +} -// Set copy-range of only first 1MiB of file. -src.SetRange(0, 1024*1024-1) // Destination object -dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil) -if err != nil { - fmt.Println(err) - return +dstOpts := minio.CopyDestOptions{ + Bucket: "my-bucketname", + Object: "my-objectname", } // Copy object call -err = minioClient.CopyObject(dst, src) +_, err = minioClient.CopyObject(context.Background(), dst, src) if err != nil { fmt.Println(err) return } + +fmt.Println("Successfully copied object:", uploadInfo) + ``` -### ComposeObject(dst minio.DestinationInfo, srcs []minio.SourceInfo) error +### ComposeObject(ctx context.Context, dst minio.CopyDestOptions, srcs ...minio.CopySrcOptions) (UploadInfo, error) Create an object by concatenating a list of source objects using server-side copying. __Parameters__ -|Param |Type |Description | -|:---|:---|:---| -|`dst` | _minio.DestinationInfo_ |Struct with info about the object to be created. | -|`srcs` | _[]minio.SourceInfo_ |Slice of struct with info about source objects to be concatenated in order. | +| Param | Type | Description | +|:-------|:--------------------------|:----------------------------------------------------------------------------| +| `ctx` | _context.Context_ | Custom context for timeout/cancellation of the call | +| `dst` | _minio.CopyDestOptions_ | Struct with info about the object to be created. | +| `srcs` | _...minio.CopySrcOptions_ | Slice of struct with info about source objects to be concatenated in order. | -__Example__ +__minio.UploadInfo__ + +| Field | Type | Description | +|:--------------------|:---------|:-------------------------------------------| +| `info.ETag` | _string_ | The ETag of the new object | +| `info.VersionID` | _string_ | The version identifyer of the new object | +__Example__ + ```go // Prepare source decryption key (here we assume same key to // decrypt all source objects.) @@ -729,215 +703,100 @@ sseSrc := encrypt.DefaultPBKDF([]byte("password"), []byte("salt")) // Source objects to concatenate. We also specify decryption // key for each -src1 := minio.NewSourceInfo("bucket1", "object1", sseSrc) -src1.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a") - -src2 := minio.NewSourceInfo("bucket2", "object2", sseSrc) -src2.SetMatchETagCond("f8ef9c385918b653a31624deb84149d2") +src1Opts := minio.CopySrcOptions{ + Bucket: "bucket1", + Object: "object1", + Encryption: sseSrc, + MatchETag: "31624deb84149d2f8ef9c385918b653a", +} -src3 := minio.NewSourceInfo("bucket3", "object3", sseSrc) -src3.SetMatchETagCond("5918b653a31624deb84149d2f8ef9c38") +src2Opts := minio.CopySrcOptions{ + Bucket: "bucket2", + Object: "object2", + Encryption: sseSrc, + MatchETag: "f8ef9c385918b653a31624deb84149d2", +} -// Create slice of sources. -srcs := []minio.SourceInfo{src1, src2, src3} +src3Opts := minio.CopySrcOptions{ + Bucket: "bucket3", + Object: "object3", + Encryption: sseSrc, + MatchETag: "5918b653a31624deb84149d2f8ef9c38", +} // Prepare destination encryption key sseDst := encrypt.DefaultPBKDF([]byte("new-password"), []byte("new-salt")) // Create destination info -dst, err := minio.NewDestinationInfo("bucket", "object", sseDst, nil) -if err != nil { - fmt.Println(err) - return +dstOpts := CopyDestOptions{ + Bucket: "bucket", + Object: "object", + Encryption: sseDst, } // Compose object call by concatenating multiple source files. -err = minioClient.ComposeObject(dst, srcs) +uploadInfo, err := minioClient.ComposeObject(context.Background(), dst, srcs...) if err != nil { fmt.Println(err) return } -fmt.Println("Composed object successfully.") +fmt.Println("Composed object successfully:", uploadInfo) ``` - -### NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo -Construct a `SourceInfo` object that can be used as the source for server-side copying operations like `CopyObject` and `ComposeObject`. This object can be used to set copy-conditions on the source. + +### FPutObject(ctx context.Context, bucketName, objectName, filePath, opts PutObjectOptions) (info UploadInfo, err error) +Uploads contents from a file to objectName. + +FPutObject uploads objects that are less than 128MiB in a single PUT operation. For objects that are greater than the 128MiB in size, FPutObject seamlessly uploads the object in chunks of 128MiB or more depending on the actual file size. The max upload size for an object is 5TB. __Parameters__ -| Param | Type | Description | -| :--- | :--- | :--- | -| `bucket` | _string_ | Name of the source bucket | -| `object` | _string_ | Name of the source object | -| `sse` | _*encrypt.ServerSide_ | Interface provided by `encrypt` package to specify server-side-encryption. (For more information see https://godoc.org/github.com/minio/minio-go) | -__Example__ +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket | +|`objectName` | _string_ |Name of the object | +|`filePath` | _string_ |Path to file to be uploaded | +|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding, content-disposition, content-language and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. | -```go -// No decryption parameter. -src := minio.NewSourceInfo("bucket", "object", nil) -// Destination object -dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil) -if err != nil { - fmt.Println(err) - return -} +__minio.UploadInfo__ -// Copy object call -err = minioClient.CopyObject(dst, src) -if err != nil { - fmt.Println(err) - return -} -``` +| Field | Type | Description | +|:--------------------|:---------|:-------------------------------------------| +| `info.ETag` | _string_ | The ETag of the new object | +| `info.VersionID` | _string_ | The version identifyer of the new object | -```go -// With decryption parameter. -sseSrc := encrypt.DefaultPBKDF([]byte("password"), []byte("salt")) -src := minio.NewSourceInfo("bucket", "object", sseSrc) -// Destination object -dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil) -if err != nil { - fmt.Println(err) - return -} +__Example__ -// Copy object call -err = minioClient.CopyObject(dst, src) + +```go +uploadInfo, err := minioClient.FPutObject(context.Background(), "my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{ + ContentType: "application/csv", +}); if err != nil { fmt.Println(err) return } +fmt.Println("Successfully uploaded object: ", uploadInfo) ``` - -### NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo, userMeta map[string]string) (DestinationInfo, error) -Construct a `DestinationInfo` object that can be used as the destination object for server-side copying operations like `CopyObject` and `ComposeObject`. + +### StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) +Fetch metadata of an object. __Parameters__ -| Param | Type | Description | -| :--- | :--- | :--- | -| `bucket` | _string_ | Name of the destination bucket | -| `object` | _string_ | Name of the destination object | -| `sse` | _*encrypt.ServerSide_ | Interface provided by `encrypt` package to specify server-side-encryption. (For more information see https://godoc.org/github.com/minio/minio-go) | | -| `userMeta` | _map[string]string_ | User metadata to be set on the destination. If nil, with only one source, user-metadata is copied from source. | -__Example__ - -```go -// No encryption parameter. -src := minio.NewSourceInfo("bucket", "object", nil) -dst, err := minio.NewDestinationInfo("bucket", "object", nil, nil) -if err != nil { - fmt.Println(err) - return -} - -// Copy object call -err = minioClient.CopyObject(dst, src) -if err != nil { - fmt.Println(err) - return -} -``` - -```go -src := minio.NewSourceInfo("bucket", "object", nil) - -// With encryption parameter. -sseDst := encrypt.DefaultPBKDF([]byte("password"), []byte("salt")) -dst, err := minio.NewDestinationInfo("bucket", "object", sseDst, nil) -if err != nil { - fmt.Println(err) - return -} - -// Copy object call -err = minioClient.CopyObject(dst, src) -if err != nil { - fmt.Println(err) - return -} -``` - - -### FPutObject(bucketName, objectName, filePath, opts PutObjectOptions) (length int64, err error) -Uploads contents from a file to objectName. - -FPutObject uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than the 64MiB in size, FPutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB. - -__Parameters__ - - -|Param |Type |Description | -|:---|:---| :---| -|`bucketName` | _string_ |Name of the bucket | -|`objectName` | _string_ |Name of the object | -|`filePath` | _string_ |Path to file to be uploaded | -|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding, content-disposition, content-language and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. | - - -__Example__ - - -```go -n, err := minioClient.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{ - ContentType: "application/csv", -}); -if err != nil { - fmt.Println(err) - return -} -fmt.Println("Successfully uploaded bytes: ", n) -``` - - -### FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath, opts PutObjectOptions) (length int64, err error) -Identical to FPutObject operation, but allows request cancellation. - -__Parameters__ - - -|Param |Type |Description | -|:---|:---| :---| -|`ctx` | _context.Context_ |Request context | -|`bucketName` | _string_ |Name of the bucket | -|`objectName` | _string_ |Name of the object | -|`filePath` | _string_ |Path to file to be uploaded | -|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding,content-disposition and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. | - -__Example__ - - -```go -ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second) -defer cancel() - -n, err := minioClient.FPutObjectWithContext(ctx, "mybucket", "myobject.csv", "/tmp/otherobject.csv", minio.PutObjectOptions{ContentType:"application/csv"}) -if err != nil { - fmt.Println(err) - return -} -fmt.Println("Successfully uploaded bytes: ", n) -``` - - -### StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) -Fetch metadata of an object. - -__Parameters__ - - -|Param |Type |Description | -|:---|:---| :---| -|`bucketName` | _string_ |Name of the bucket | -|`objectName` | _string_ |Name of the object | -|`opts` | _minio.StatObjectOptions_ | Options for GET info/stat requests specifying additional options like encryption, If-Match | +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket | +|`objectName` | _string_ |Name of the object | +|`opts` | _minio.StatObjectOptions_ | Options for GET info/stat requests specifying additional options like encryption, If-Match | __Return Value__ @@ -961,7 +820,7 @@ __Example__ ```go -objInfo, err := minioClient.StatObject("mybucket", "myobject", minio.StatObjectOptions{}) +objInfo, err := minioClient.StatObject(context.Background(), "mybucket", "myobject", minio.StatObjectOptions{}) if err != nil { fmt.Println(err) return @@ -970,37 +829,70 @@ fmt.Println(objInfo) ``` -### RemoveObject(bucketName, objectName string) error -Removes an object. +### RemoveObject(ctx context.Context, bucketName, objectName string, opts minio.RemoveObjectOptions) error +Removes an object with some specified options __Parameters__ |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | |`objectName` | _string_ |Name of the object | +|`opts` |_minio.RemoveObjectOptions_ |Allows user to set options | + +__minio.RemoveObjectOptions__ + +|Field | Type | Description | +|:--- |:--- | :--- | +| `opts.GovernanceBypass` | _bool_ |Set the bypass governance header to delete an object locked with GOVERNANCE mode| +| `opts.VersionID` | _string_ |Version ID of the object to delete| ```go -err = minioClient.RemoveObject("mybucket", "myobject") +opts := minio.RemoveObjectOptions { + GovernanceBypass: true, + VersionID: "myversionid", + } +err = minioClient.RemoveObject(context.Background(), "mybucket", "myobject", opts) if err != nil { fmt.Println(err) return } ``` + +### PutObjectRetention(ctx context.Context, bucketName, objectName string, opts minio.PutObjectRetentionOptions) error +Applies object retention lock onto an object. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket | +|`objectName` | _string_ |Name of the object | +|`opts` |_minio.PutObjectRetentionOptions_ |Allows user to set options like retention mode, expiry date and version id | -### RemoveObjects(bucketName string, objectsCh chan string) (errorCh <-chan RemoveObjectError) +### RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan string, opts RemoveObjectsOptions) <-chan RemoveObjectError Removes a list of objects obtained from an input channel. The call sends a delete request to the server up to 1000 objects at a time. The errors observed are sent over the error channel. -__Parameters__ +Parameters |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | -|`objectsCh` | _chan string_ | Channel of objects to be removed | +|`objectsCh` | _chan string_ | Channel of objects to be removed | +|`opts` |_minio.RemoveObjectsOptions_ | Allows user to set options | + +__minio.RemoveObjectsOptions__ +|Field | Type | Description | +|:--- |:--- | :--- | +| `opts.GovernanceBypass` | _bool_ |Set the bypass governance header to delete an object locked with GOVERNANCE mode| __Return Values__ @@ -1008,7 +900,6 @@ __Return Values__ |:---|:---| :---| |`errorCh` | _<-chan minio.RemoveObjectError_ | Receive-only channel of errors observed during deletion. | - ```go objectsCh := make(chan string) @@ -1016,7 +907,7 @@ objectsCh := make(chan string) go func() { defer close(objectsCh) // List all objects from a bucket-name with a matching prefix. - for object := range minioClient.ListObjects("my-bucketname", "my-prefixname", true, nil) { + for object := range minioClient.ListObjects(context.Background(), "my-bucketname", "my-prefixname", true, nil) { if object.Err != nil { log.Fatalln(object.Err) } @@ -1024,49 +915,87 @@ go func() { } }() -for rErr := range minioClient.RemoveObjects("mybucket", objectsCh) { +opts := minio.RemoveObjectsOptions{ + GovernanceBypass: true, +} + +for rErr := range minioClient.RemoveObjects(context.Background(), "my-bucketname", objectsCh, opts) { fmt.Println("Error detected during deletion: ", rErr) } ``` - -### RemoveObjectsWithContext(ctx context.Context, bucketName string, objectsCh chan string) (errorCh <-chan RemoveObjectError) -*Identical to RemoveObjects operation, but accepts a context for request cancellation.* + +### GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) +Returns retention set on a given object. + +__Parameters__ -Parameters |Param |Type |Description | |:---|:---| :---| -|`ctx` | _context.Context_ |Request context | +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | -|`objectsCh` | _chan string_ | Channel of objects to be removed | +|`objectName` | _string_ |Name of the object | +|`versionID` |_string_ |Version ID of the object | + +```go +err = minioClient.PutObjectRetention(context.Background(), "mybucket", "myobject", "") +if err != nil { + fmt.Println(err) + return +} +``` + +### PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts minio.PutObjectLegalHoldOptions) error +Applies legal-hold onto an object. +__Parameters__ -__Return Values__ |Param |Type |Description | |:---|:---| :---| -|`errorCh` | _<-chan minio.RemoveObjectError_ | Receive-only channel of errors observed during deletion. | +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket | +|`objectName` | _string_ |Name of the object | +|`opts` |_minio.PutObjectLegalHoldOptions_ |Allows user to set options like status and version id | + +_minio.PutObjectLegalHoldOptions_ + +|Field | Type | Description | +|:--- |:--- | :--- | +| `opts.Status` | _*minio.LegalHoldStatus_ |Legal-Hold status to be set| +| `opts.VersionID` | _string_ |Version ID of the object to apply retention on| ```go -objectsCh := make(chan string) -ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second) -defer cancel() +s := minio.LegalHoldEnabled +opts := minio.PutObjectLegalHoldOptions { + Status: &s, +} +err = minioClient.PutObjectLegalHold(context.Background(), "mybucket", "myobject", opts) +if err != nil { + fmt.Println(err) + return +} +``` + +### GetObjectLegalHold(ctx context.Context, bucketName, objectName, versionID string) (status *LegalHoldStatus, err error) +Returns legal-hold status on a given object. -// Send object names that are needed to be removed to objectsCh -go func() { - defer close(objectsCh) - // List all objects from a bucket-name with a matching prefix. - for object := range minioClient.ListObjects("my-bucketname", "my-prefixname", true, nil) { - if object.Err != nil { - log.Fatalln(object.Err) - } - objectsCh <- object.Key - } -}() +__Parameters__ -for rErr := range minioClient.RemoveObjects(ctx, "my-bucketname", objectsCh) { - fmt.Println("Error detected during deletion: ", rErr) +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket | +|`objectName` | _string_ |Name of the object | +|`opts` |_minio.GetObjectLegalHoldOptions_ |Allows user to set options like version id | + +```go +opts := minio.GetObjectLegalHoldOptions{} +err = minioClient.GetObjectLegalHold(context.Background(), "mybucket", "myobject", opts) +if err != nil { + fmt.Println(err) + return } ``` @@ -1075,6 +1004,7 @@ Parameters |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`ctx` | _context.Context_ |Request context | |`bucketName` | _string_ |Name of the bucket | |`objectName` | _string_ |Name of the object | @@ -1088,10 +1018,10 @@ __Return Values__ ```go // Initialize minio client object. - minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) - if err != nil { - log.Fatalln(err) - } + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) opts := minio.SelectObjectOptions{ Expression: "select count(*) from s3object", @@ -1123,8 +1053,82 @@ __Return Values__ } ``` + +### PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags) error +set new object Tags to the given object, replaces/overwrites any existing tags. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket | +|`objectName` | _string_ |Name of the object | +|`objectTags` | _*tags.Tags_ | Map with Object Tag's Key and Value | + +__Example__ + + +```go +err = minioClient.PutObjectTagging(context.Background(), bucketName, objectName, objectTags) +if err != nil { + fmt.Println(err) + return +} +``` + + +### GetObjectTagging(ctx context.Context, bucketName, objectName string) (*tags.Tags, error) +Fetch Object Tags from the given object + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket | +|`objectName` | _string_ |Name of the object | + +__Example__ + + +```go +tags, err = minioClient.GetObjectTagging(context.Background(), bucketName, objectName) +if err != nil { + fmt.Println(err) + return +} +fmt.Printf("Fetched Tags: %s", tags) +``` + + +### RemoveObjectTagging(ctx context.Context, bucketName, objectName string) error +Remove Object Tags from the given object + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket | +|`objectName` | _string_ |Name of the object | + +__Example__ + + +```go +err = minioClient.RemoveObjectTagging(context.Background(), bucketName, objectName) +if err != nil { + fmt.Println(err) + return +} +``` + -### RemoveIncompleteUpload(bucketName, objectName string) error +### RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error Removes a partially uploaded object. __Parameters__ @@ -1132,6 +1136,7 @@ __Parameters__ |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | |`objectName` | _string_ |Name of the object | @@ -1139,7 +1144,7 @@ __Example__ ```go -err = minioClient.RemoveIncompleteUpload("mybucket", "myobject") +err = minioClient.RemoveIncompleteUpload(context.Background(), "mybucket", "myobject") if err != nil { fmt.Println(err) return @@ -1149,7 +1154,7 @@ if err != nil { ## 5. Presigned operations -### PresignedGetObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error) +### PresignedGetObject(ctx context.Context, bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error) Generates a presigned URL for HTTP GET operations. Browsers/Mobile clients may point to this URL to directly download objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days. __Parameters__ @@ -1157,6 +1162,7 @@ __Parameters__ |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | |`objectName` | _string_ |Name of the object | |`expiry` | _time.Duration_ |Expiry of presigned URL in seconds | @@ -1172,7 +1178,7 @@ reqParams := make(url.Values) reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"") // Generates a presigned url which expires in a day. -presignedURL, err := minioClient.PresignedGetObject("mybucket", "myobject", time.Second * 24 * 60 * 60, reqParams) +presignedURL, err := minioClient.PresignedGetObject(context.Background(), "mybucket", "myobject", time.Second * 24 * 60 * 60, reqParams) if err != nil { fmt.Println(err) return @@ -1181,7 +1187,7 @@ fmt.Println("Successfully generated presigned URL", presignedURL) ``` -### PresignedPutObject(bucketName, objectName string, expiry time.Duration) (*url.URL, error) +### PresignedPutObject(ctx context.Context, bucketName, objectName string, expiry time.Duration) (*url.URL, error) Generates a presigned URL for HTTP PUT operations. Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days. NOTE: you can upload to S3 only with specified object name. @@ -1191,6 +1197,7 @@ __Parameters__ |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | |`objectName` | _string_ |Name of the object | |`expiry` | _time.Duration_ |Expiry of presigned URL in seconds | @@ -1202,7 +1209,7 @@ __Example__ ```go // Generates a url which expires in a day. expiry := time.Second * 24 * 60 * 60 // 1 day. -presignedURL, err := minioClient.PresignedPutObject("mybucket", "myobject", expiry) +presignedURL, err := minioClient.PresignedPutObject(context.Background(), "mybucket", "myobject", expiry) if err != nil { fmt.Println(err) return @@ -1211,13 +1218,14 @@ fmt.Println("Successfully generated presigned URL", presignedURL) ``` -### PresignedHeadObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error) +### PresignedHeadObject(ctx context.Context, bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error) Generates a presigned URL for HTTP HEAD operations. Browsers/Mobile clients may point to this URL to directly get metadata from objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days. __Parameters__ |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | |`objectName` | _string_ |Name of the object | |`expiry` | _time.Duration_ |Expiry of presigned URL in seconds | @@ -1233,7 +1241,7 @@ reqParams := make(url.Values) reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"") // Generates a presigned url which expires in a day. -presignedURL, err := minioClient.PresignedHeadObject("mybucket", "myobject", time.Second * 24 * 60 * 60, reqParams) +presignedURL, err := minioClient.PresignedHeadObject(context.Background(), "mybucket", "myobject", time.Second * 24 * 60 * 60, reqParams) if err != nil { fmt.Println(err) return @@ -1242,7 +1250,7 @@ fmt.Println("Successfully generated presigned URL", presignedURL) ``` -### PresignedPostPolicy(PostPolicy) (*url.URL, map[string]string, error) +### PresignedPostPolicy(ctx context.Context, post PostPolicy) (*url.URL, map[string]string, error) Allows setting policy conditions to a presigned URL for POST operations. Policies such as bucket name to receive object uploads, key name prefixes, expiry policy may be set. ```go @@ -1264,7 +1272,7 @@ policy.SetContentLengthRange(1024, 1024*1024) policy.SetUserMetadata("custom", "user") // Get the POST form key/value object: -url, formData, err := minioClient.PresignedPostPolicy(policy) +url, formData, err := minioClient.PresignedPostPolicy(context.Background(), policy) if err != nil { fmt.Println(err) return @@ -1282,13 +1290,14 @@ fmt.Printf("%s\n", url) ## 6. Bucket policy/notification operations -### SetBucketPolicy(bucketname, policy string) error +### SetBucketPolicy(ctx context.Context, bucketname, policy string) error Set access permissions on bucket or an object prefix. __Parameters__ |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket| |`policy` | _string_ |Policy to be set | @@ -1303,7 +1312,7 @@ __Example__ ```go policy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:GetObject"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::my-bucketname/*"],"Sid": ""}]}` -err = minioClient.SetBucketPolicy("my-bucketname", policy) +err = minioClient.SetBucketPolicy(context.Background(), "my-bucketname", policy) if err != nil { fmt.Println(err) return @@ -1311,7 +1320,7 @@ if err != nil { ``` -### GetBucketPolicy(bucketName) (policy string, error) +### GetBucketPolicy(ctx context.Context, bucketName string) (policy string, error) Get access permissions on a bucket or a prefix. __Parameters__ @@ -1319,6 +1328,7 @@ __Parameters__ |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | __Return Values__ @@ -1332,14 +1342,14 @@ __Return Values__ __Example__ ```go -policy, err := minioClient.GetBucketPolicy("my-bucketname") +policy, err := minioClient.GetBucketPolicy(context.Background(), "my-bucketname") if err != nil { log.Fatalln(err) } ``` -### GetBucketNotification(bucketName string) (BucketNotification, error) +### GetBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) Get notification configuration on a bucket. __Parameters__ @@ -1347,6 +1357,7 @@ __Parameters__ |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | __Return Values__ @@ -1354,14 +1365,14 @@ __Return Values__ |Param |Type |Description | |:---|:---| :---| -|`bucketNotification` | _minio.BucketNotification_ |structure which holds all notification configurations| +|`config` | _notification.Configuration_ |structure which holds all notification configurations| |`err` | _error_ |Standard Error | __Example__ ```go -bucketNotification, err := minioClient.GetBucketNotification("mybucket") +bucketNotification, err := minioClient.GetBucketNotification(context.Background(), "mybucket") if err != nil { fmt.Println("Failed to get bucket notification configurations for mybucket", err) return @@ -1375,7 +1386,7 @@ for _, queueConfig := range bucketNotification.QueueConfigs { ``` -### SetBucketNotification(bucketName string, bucketNotification BucketNotification) error +### SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error Set a new bucket notification on a bucket. __Parameters__ @@ -1383,8 +1394,9 @@ __Parameters__ |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | -|`bucketNotification` | _minio.BucketNotification_ |Represents the XML to be sent to the configured web service | +|`config` | _notification.Configuration_ |Represents the XML to be sent to the configured web service | __Return Values__ @@ -1397,17 +1409,17 @@ __Example__ ```go -queueArn := minio.NewArn("aws", "sqs", "us-east-1", "804605494417", "PhotoUpdate") +queueArn := notification.NewArn("aws", "sqs", "us-east-1", "804605494417", "PhotoUpdate") -queueConfig := minio.NewNotificationConfig(queueArn) +queueConfig := notification.NewConfig(queueArn) queueConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll) queueConfig.AddFilterPrefix("photos/") queueConfig.AddFilterSuffix(".jpg") -bucketNotification := minio.BucketNotification{} -bucketNotification.AddQueue(queueConfig) +config := notification.Configuration{} +config.AddQueue(queueConfig) -err = minioClient.SetBucketNotification("mybucket", bucketNotification) +err = minioClient.SetBucketNotification(context.Background(), "mybucket", config) if err != nil { fmt.Println("Unable to set the bucket notification: ", err) return @@ -1415,7 +1427,7 @@ if err != nil { ``` -### RemoveAllBucketNotification(bucketName string) error +### RemoveAllBucketNotification(ctx context.Context, bucketName string) error Remove all configured bucket notifications on a bucket. __Parameters__ @@ -1423,6 +1435,7 @@ __Parameters__ |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | __Return Values__ @@ -1436,7 +1449,7 @@ __Example__ ```go -err = minioClient.RemoveAllBucketNotification("mybucket") +err = minioClient.RemoveAllBucketNotification(context.Background(), "mybucket") if err != nil { fmt.Println("Unable to remove bucket notifications.", err) return @@ -1444,7 +1457,7 @@ if err != nil { ``` -### ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo +### ListenBucketNotification(context context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info ListenBucketNotification API receives bucket notification events through the notification channel. The returned notification channel has two fields 'Records' and 'Err'. - 'Records' holds the notifications received from the server. @@ -1461,18 +1474,17 @@ __Parameters__ |`prefix` | _string_ | Object key prefix to filter notifications for | |`suffix` | _string_ | Object key suffix to filter notifications for | |`events` | _[]string_ | Enables notifications for specific event types | -|`doneCh` | _chan struct{}_ | A message on this channel ends the ListenBucketNotification iterator | __Return Values__ |Param |Type |Description | |:---|:---| :---| -|`notificationInfo` | _chan minio.NotificationInfo_ | Channel of bucket notifications | +|`notificationInfo` | _chan notification.Info_ | Channel of bucket notifications | __minio.NotificationInfo__ |Field |Type |Description | -|`notificationInfo.Records` | _[]minio.NotificationEvent_ | Collection of notification events | +|`notificationInfo.Records` | _[]notification.Event_ | Collection of notification events | |`notificationInfo.Err` | _error_ | Carries any error occurred during the operation (Standard Error) | @@ -1480,18 +1492,62 @@ __Example__ ```go -// Create a done channel to control 'ListenBucketNotification' go routine. -doneCh := make(chan struct{}) +// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events. +for notificationInfo := range minioClient.ListenBucketNotification(context.Background(), "mybucket", "myprefix/", ".mysuffix", []string{ + "s3:ObjectCreated:*", + "s3:ObjectAccessed:*", + "s3:ObjectRemoved:*", + }) { + if notificationInfo.Err != nil { + fmt.Println(notificationInfo.Err) + } + fmt.Println(notificationInfo) +} +``` + + +### ListenNotification(context context.Context, prefix, suffix string, events []string) <-chan notification.Info +ListenNotification API receives bucket and object notification events through the notification channel. The returned notification channel has two fields 'Records' and 'Err'. + +- 'Records' holds the notifications received from the server. +- 'Err' indicates any error while processing the received notifications. + +NOTE: Notification channel is closed at the first occurrence of an error. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`bucketName` | _string_ | Bucket to listen notifications on | +|`prefix` | _string_ | Object key prefix to filter notifications for | +|`suffix` | _string_ | Object key suffix to filter notifications for | +|`events` | _[]string_ | Enables notifications for specific event types | + +__Return Values__ + +|Param |Type |Description | +|:---|:---| :---| +|`notificationInfo` | _chan notification.Info_ | Read channel for all notifications | + +__minio.NotificationInfo__ + +|Field |Type |Description | +|`notificationInfo.Records` | _[]notification.Event_ | Collection of notification events | +|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation (Standard Error) | + +__Example__ -// Indicate a background go-routine to exit cleanly upon return. -defer close(doneCh) +```go // Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events. -for notificationInfo := range minioClient.ListenBucketNotification("mybucket", "myprefix/", ".mysuffix", []string{ +for notificationInfo := range minioClient.ListenNotification(context.Background(), "myprefix/", ".mysuffix", []string{ + "s3:BucketCreated:*", + "s3:BucketRemoved:*", "s3:ObjectCreated:*", "s3:ObjectAccessed:*", "s3:ObjectRemoved:*", - }, doneCh) { + }) { if notificationInfo.Err != nil { fmt.Println(notificationInfo.Err) } @@ -1500,15 +1556,16 @@ for notificationInfo := range minioClient.ListenBucketNotification("mybucket", " ``` -### SetBucketLifecycle(bucketname, lifecycle string) error +### SetBucketLifecycle(ctx context.Context, bucketname, config *lifecycle.Configuration) error Set lifecycle on bucket or an object prefix. __Parameters__ |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket| -|`lifecycle` | _string_ |Lifecycle to be set | +|`config` | _lifecycle.Configuration_ |Lifecycle to be set | __Return Values__ @@ -1519,18 +1576,18 @@ __Return Values__ __Example__ ```go -lifecycle := ` - - expire-bucket - - Enabled - - 365 - - -` - -err = minioClient.SetBucketLifecycle("my-bucketname", lifecycle) +config := lifecycle.NewConfiguration() +config.Rules = []lifecycle.Rule{ + { + ID: "expire-bucket", + Status: "Enabled", + Expiration: lifecycle.Expiration{ + Days: 365, + }, + }, +} + +err = minioClient.SetBucketLifecycle(context.Background(), "my-bucketname", config) if err != nil { fmt.Println(err) return @@ -1538,7 +1595,7 @@ if err != nil { ``` -### GetBucketLifecycle(bucketName) (lifecycle string, error) +### GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration error) Get lifecycle on a bucket or a prefix. __Parameters__ @@ -1546,6 +1603,7 @@ __Parameters__ |Param |Type |Description | |:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| |`bucketName` | _string_ |Name of the bucket | __Return Values__ @@ -1553,22 +1611,418 @@ __Return Values__ |Param |Type |Description | |:---|:---| :---| -|`lifecycle` | _string_ |Lifecycle returned from the server | +|`config` | _lifecycle.Configuration_ |Lifecycle returned from the server | |`err` | _error_ |Standard Error | __Example__ ```go -lifecycle, err := minioClient.GetBucketLifecycle("my-bucketname") +lifecycle, err := minioClient.GetBucketLifecycle(context.Background(), "my-bucketname") if err != nil { log.Fatalln(err) } ``` -## 7. Client custom settings + +### SetBucketEncryption(ctx context.Context, bucketname string, config sse.Configuration) error +Set default encryption configuration on a bucket. - -### SetAppInfo(appName, appVersion string) +__Parameters__ + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket| +|`config` | _sse.Configuration_ | Structure that holds default encryption configuration to be set | + +__Return Values__ + +|Param |Type |Description | +|:---|:---| :---| +|`err` | _error_ |Standard Error | + +__Example__ + +```go +s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, +}) +if err != nil { + log.Fatalln(err) +} + +// Set default encryption configuration on an S3 bucket +err = s3Client.SetBucketEncryption(context.Background(), "my-bucketname", sse.NewConfigurationSSES3()) +if err != nil { + log.Fatalln(err) +} +``` + + +### GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) +Get default encryption configuration set on a bucket. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket | + +__Return Values__ + + +|Param |Type |Description | +|:---|:---| :---| +|`config` | _sse.Configuration_ | Structure that holds default encryption configuration | +|`err` | _error_ |Standard Error | + +__Example__ + +```go +s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, +}) +if err != nil { + log.Fatalln(err) +} + +// Get default encryption configuration set on an S3 bucket and print it out +encryptionConfig, err := s3Client.GetBucketEncryption(context.Background(), "my-bucketname") +if err != nil { + log.Fatalln(err) +} +fmt.Printf("%+v\n", encryptionConfig) +``` + + +### RemoveBucketEncryption(ctx context.Context, bucketName string) (error) +Remove default encryption configuration set on a bucket. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---|:---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket | + +__Return Values__ + + +|Param |Type |Description | +|:---|:---| :---| +|`err` | _error_ |Standard Error | + +__Example__ + +```go +err := s3Client.RemoveBucketEncryption(context.Background(), "my-bucketname") +if err != nil { + log.Fatalln(err) +} +// "my-bucket" is successfully deleted/removed. +``` + + +### SetObjectLockConfig(ctx context.Context, bucketname, mode *RetentionMode, validity *uint, unit *ValidityUnit) error +Set object lock configuration in given bucket. mode, validity and unit are either all set or all nil. + +__Parameters__ + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket| +|`mode` | _RetentionMode_ |Retention mode to be set | +|`validity` | _uint_ |Validity period to be set | +|`unit` | _ValidityUnit_ |Unit of validity period | + +__Return Values__ + +|Param |Type |Description | +|:---|:---| :---| +|`err` | _error_ |Standard Error | + +__Example__ + +```go +mode := Governance +validity := uint(30) +unit := Days + +err = minioClient.SetObjectLockConfig(context.Background(), "my-bucketname", &mode, &validity, &unit) +if err != nil { + fmt.Println(err) + return +} +``` + + +### GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock,*RetentionMode, *uint, *ValidityUnit, error) +Get object lock configuration of given bucket. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket | + +__Return Values__ + + +|Param |Type |Description | +|:---|:---| :---| +|`objectLock` | _objectLock_ |lock enabled status | +|`mode` | _RetentionMode_ |Current retention mode | +|`validity` | _uint_ |Current validity period | +|`unit` | _ValidityUnit_ |Unit of validity period | +|`err` | _error_ |Standard Error | + +__Example__ + +```go +enabled, mode, validity, unit, err := minioClient.GetObjectLockConfig(context.Background(), "my-bucketname") +if err != nil { + log.Fatalln(err) +} +fmt.Println("object lock is %s for this bucket",enabled) +if mode != nil { + fmt.Printf("%v mode is enabled for %v %v for bucket 'my-bucketname'\n", *mode, *validity, *unit) +} else { + fmt.Println("No mode is enabled for bucket 'my-bucketname'") +} +``` + + +### EnableVersioning(ctx context.Context, bucketName string) error +Enable bucket versioning support. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket | + +__Return Values__ + + +|Param |Type |Description | +|:---|:---| :---| +|`err` | _error_ |Standard Error | + +__Example__ + +```go +err := minioClient.EnableVersioning(context.Background(), "my-bucketname") +if err != nil { + log.Fatalln(err) +} + +fmt.Println("versioning enabled for bucket 'my-bucketname'") +``` + + +### DisableVersioning(ctx context.Context, bucketName) error +Disable bucket versioning support. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket | + +__Return Values__ + + +|Param |Type |Description | +|:---|:---| :---| +|`err` | _error_ |Standard Error | + +__Example__ + +```go +err := minioClient.DisableVersioning(context.Background(), "my-bucketname") +if err != nil { + log.Fatalln(err) +} + +fmt.Println("versioning disabled for bucket 'my-bucketname'") +``` + + +### GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) +Get versioning configuration set on a bucket. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket | + +__Return Values__ + + +|Param |Type |Description | +|:---|:---| :---| +|`configuration` | _minio.BucketVersioningConfiguration_ | Structure that holds versioning configuration | +|`err` | _error_ |Standard Error | + +__Example__ + +```go +s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, +}) +if err != nil { + log.Fatalln(err) +} + +// Get versioning configuration set on an S3 bucket and print it out +versioningConfig, err := s3Client.GetBucketVersioning(context.Background(), "my-bucketname") +if err != nil { + log.Fatalln(err) +} +fmt.Printf("%+v\n", versioningConfig) +``` + + + +### SetBucketReplication(ctx context.Context, bucketname, cfg replication.Config) error +Set replication configuration on a bucket. Role can be obtained by first defining the replication target on MinIO using `mc admin bucket remote set` to associate the source and destination buckets for replication with the replication endpoint. + +__Parameters__ + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket| +|`cfg` | _replication.Config_ |Replication configuration to be set | + +__Return Values__ + +|Param |Type |Description | +|:---|:---| :---| +|`err` | _error_ |Standard Error | + +__Example__ + +```go +replicationStr := ` + + + + Disabled + + + string + string + + + + string + + string + string + + ... + + string + + string + string + + + string + string + integer + string + +` +replicationConfig := replication.Config{} +if err := xml.Unmarshal([]byte(replicationStr), &replicationConfig); err != nil { + log.Fatalln(err) +} +cfg.Role := "arn:minio:s3::598361bf-3cec-49a7-b529-ce870a34d759:*" +err = minioClient.SetBucketReplication(context.Background(), "my-bucketname", replicationConfig) +if err != nil { + fmt.Println(err) + return +} +``` + + +### GetBucketReplication(ctx context.Context, bucketName string) (replication.Config, error) +Get current replication config on a bucket. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket | + +__Return Values__ + + +|Param |Type |Description | +|:---|:---| :---| +|`replication` | _replication.Config_ |Replication config returned from the server | +|`err` | _error_ |Standard Error | + +__Example__ + +```go +replication, err := minioClient.GetBucketReplication(context.Background(), "my-bucketname", ReplicationReqOptions{}) +if err != nil { + log.Fatalln(err) +} +``` + + +### RemoveBucketReplication(ctx context.Context, bucketname string) error +Removes replication configuration on a bucket. + +__Parameters__ + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ | Custom context for timeout/cancellation of the call| +|`bucketName` | _string_ |Name of the bucket| + +__Return Values__ + +|Param |Type |Description | +|:---|:---| :---| +|`err` | _error_ |Standard Error | + +__Example__ + +```go +err = minioClient.RemoveBucketReplication(context.Background(), "my-bucketname", ReplicationReqOptions{}) +if err != nil { + fmt.Println(err) + return +} +``` + +## 7. Client custom settings + + +### SetAppInfo(appName, appVersion string) Add custom application details to User-Agent. __Parameters__ @@ -1623,8 +2077,3 @@ __Parameters__ | Param | Type | Description | |---|---|---| |`acceleratedEndpoint` | _string_ | Set to new S3 transfer acceleration endpoint.| - - -## 8. Explore Further - -- [Build your own Go Music Player App example](https://docs.minio.io/docs/go-music-player-app) diff --git a/docs/checker.go.template b/docs/checker.go.template deleted file mode 100644 index 2e0f13a53..000000000 --- a/docs/checker.go.template +++ /dev/null @@ -1,21 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/minio/minio-go" -) - -func main() { - // Use a secure connection. - ssl := true - - // Initialize minio client object. - minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl) - if err != nil { - fmt.Println(err) - return - } - - {{.Text}} -} diff --git a/docs/validator.go b/docs/validator.go deleted file mode 100644 index 7d5cbaaab..000000000 --- a/docs/validator.go +++ /dev/null @@ -1,227 +0,0 @@ -// +build ignore - -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" - "text/template" - - "github.com/a8m/mark" - "github.com/gernest/wow" - "github.com/gernest/wow/spin" - "github.com/minio/cli" -) - -func init() { - // Validate go binary. - if _, err := exec.LookPath("go"); err != nil { - panic(err) - } -} - -var globalFlags = []cli.Flag{ - cli.StringFlag{ - Name: "m", - Value: "API.md", - Usage: "Path to markdown api documentation.", - }, - cli.StringFlag{ - Name: "t", - Value: "checker.go.template", - Usage: "Template used for generating the programs.", - }, - cli.IntFlag{ - Name: "skip", - Value: 2, - Usage: "Skip entries before validating the code.", - }, -} - -func runGofmt(path string) (msg string, err error) { - cmdArgs := []string{"-s", "-w", "-l", path} - cmd := exec.Command("gofmt", cmdArgs...) - stdoutStderr, err := cmd.CombinedOutput() - if err != nil { - return "", err - } - return string(stdoutStderr), nil -} - -func runGoImports(path string) (msg string, err error) { - cmdArgs := []string{"-w", path} - cmd := exec.Command("goimports", cmdArgs...) - stdoutStderr, err := cmd.CombinedOutput() - if err != nil { - return string(stdoutStderr), err - } - return string(stdoutStderr), nil -} - -func runGoBuild(path string) (msg string, err error) { - // Go build the path. - cmdArgs := []string{"build", "-o", "/dev/null", path} - cmd := exec.Command("go", cmdArgs...) - stdoutStderr, err := cmd.CombinedOutput() - if err != nil { - return string(stdoutStderr), err - } - return string(stdoutStderr), nil -} - -func validatorAction(ctx *cli.Context) error { - if !ctx.IsSet("m") || !ctx.IsSet("t") { - return nil - } - docPath := ctx.String("m") - var err error - docPath, err = filepath.Abs(docPath) - if err != nil { - return err - } - data, err := ioutil.ReadFile(docPath) - if err != nil { - return err - } - - templatePath := ctx.String("t") - templatePath, err = filepath.Abs(templatePath) - if err != nil { - return err - } - - skipEntries := ctx.Int("skip") - m := mark.New(string(data), &mark.Options{ - Gfm: true, // Github markdown support is enabled by default. - }) - - t, err := template.ParseFiles(templatePath) - if err != nil { - return err - } - - tmpDir, err := ioutil.TempDir("", "md-verifier") - if err != nil { - return err - } - defer os.RemoveAll(tmpDir) - - entryN := 1 - for i := mark.NodeText; i < mark.NodeCheckbox; i++ { - if mark.NodeCode != mark.NodeType(i) { - m.AddRenderFn(mark.NodeType(i), func(node mark.Node) (s string) { - return "" - }) - continue - } - m.AddRenderFn(mark.NodeCode, func(node mark.Node) (s string) { - p, ok := node.(*mark.CodeNode) - if !ok { - return - } - p.Text = strings.NewReplacer("<", "<", ">", ">", """, `"`, "&", "&").Replace(p.Text) - if skipEntries > 0 { - skipEntries-- - return - } - - testFilePath := filepath.Join(tmpDir, "example.go") - w, werr := os.Create(testFilePath) - if werr != nil { - panic(werr) - } - t.Execute(w, p) - w.Sync() - w.Close() - entryN++ - - msg, err := runGofmt(testFilePath) - if err != nil { - fmt.Printf("Failed running gofmt on %s, with (%s):(%s)\n", testFilePath, msg, err) - os.Exit(-1) - } - - msg, err = runGoImports(testFilePath) - if err != nil { - fmt.Printf("Failed running gofmt on %s, with (%s):(%s)\n", testFilePath, msg, err) - os.Exit(-1) - } - - msg, err = runGoBuild(testFilePath) - if err != nil { - fmt.Printf("Failed running gobuild on %s, with (%s):(%s)\n", testFilePath, msg, err) - fmt.Printf("Code with possible issue in %s:\n%s", docPath, p.Text) - fmt.Printf("To test `go build %s`\n", testFilePath) - os.Exit(-1) - } - - // Once successfully built remove the test file - os.Remove(testFilePath) - return - }) - } - - w := wow.New(os.Stdout, spin.Get(spin.Moon), fmt.Sprintf(" Running validation tests in %s", tmpDir)) - - w.Start() - // Render markdown executes our checker on each code blocks. - _ = m.Render() - w.PersistWith(spin.Get(spin.Runner), " Successfully finished tests") - w.Stop() - - return nil -} - -func main() { - app := cli.NewApp() - app.Action = validatorAction - app.HideVersion = true - app.HideHelpCommand = true - app.Usage = "Validates code block sections inside API.md" - app.Author = "Minio.io" - app.Flags = globalFlags - // Help template for validator - app.CustomAppHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} - -USAGE: - {{.Name}} {{if .VisibleFlags}}[FLAGS] {{end}}COMMAND{{if .VisibleFlags}} [COMMAND FLAGS | -h]{{end}} [ARGUMENTS...] - -COMMANDS: - {{range .VisibleCommands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}} - {{end}}{{if .VisibleFlags}} -FLAGS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -TEMPLATE: - Validator uses Go's 'text/template' formatting so you need to ensure - your template is formatted correctly, check 'docs/checker.go.template' - -USAGE: - go run docs/validator.go -m docs/API.md -t /tmp/mycode.go.template - -` - app.Run(os.Args) - -} diff --git a/docs/zh_CN/API.md b/docs/zh_CN/API.md index ca80586a4..e799d54da 100644 --- a/docs/zh_CN/API.md +++ b/docs/zh_CN/API.md @@ -1,8 +1,8 @@ -# Minio Go Client API文档 [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) +# MinIO Go Client API文档 [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) -## 初使化Minio Client对象。 +## 初使化MinIO Client对象。 -## Minio +## MinIO ```go package main @@ -10,7 +10,7 @@ package main import ( "fmt" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" ) func main() { @@ -18,7 +18,7 @@ func main() { ssl := true // 初使化minio client对象。 - minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl) + minioClient, err := minio.New("play.min.io", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl) if err != nil { fmt.Println(err) return @@ -34,7 +34,7 @@ package main import ( "fmt" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" ) func main() { @@ -69,7 +69,7 @@ func main() { | | [`FPutObjectWithContext`](#FPutObjectWithContext) | | | | | | [`FGetObjectWithContext`](#FGetObjectWithContext) | | | | ## 1. 构造函数 - + ### New(endpoint, accessKeyID, secretAccessKey string, ssl bool) (*Client, error) 初使化一个新的client对象。 @@ -109,14 +109,28 @@ __参数__ |`bucketName` | _string_ | 存储桶名称 | | `location` | _string_ | 存储桶被创建的region(地区),默认是us-east-1(美国东一区),下面列举的是其它合法的值。注意:如果用的是minio服务的话,resion是在它的配置文件中,(默认是us-east-1)。| | | |us-east-1 | +| | |us-east-2 | | | |us-west-1 | | | |us-west-2 | +| | |ca-central-1 | | | |eu-west-1 | +| | |eu-west-2 | +| | |eu-west-3 | | | | eu-central-1| +| | | eu-north-1| +| | | ap-east-1| +| | | ap-south-1| | | | ap-southeast-1| -| | | ap-northeast-1| | | | ap-southeast-2| +| | | ap-northeast-1| +| | | ap-northeast-2| +| | | ap-northeast-3| +| | | me-south-1| | | | sa-east-1| +| | | us-gov-west-1| +| | | us-gov-east-1| +| | | cn-north-1| +| | | cn-northwest-1| __示例__ @@ -379,7 +393,7 @@ __minio.GetObjectOptions__ |参数 | 类型 | 描述 | |:---|:---|:---| -| `opts.Materials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) | +| `opts.Materials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go/v7) | __返回值__ @@ -524,7 +538,7 @@ __参数__ |`bucketName` | _string_ |存储桶名称 | |`objectName` | _string_ |对象的名称 | |`filePath` | _string_ |下载后保存的路径| -|`materials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) | +|`materials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go/v7) | __示例__ @@ -550,7 +564,7 @@ if err != nil { ### PutObject(bucketName, objectName string, reader io.Reader, objectSize int64,opts PutObjectOptions) (n int, err error) -当对象小于64MiB时,直接在一次PUT请求里进行上传。当大于64MiB时,根据文件的实际大小,PutObject会自动地将对象进行拆分成64MiB一块或更大一些进行上传。对象的最大大小是5TB。 +当对象小于128MiB时,直接在一次PUT请求里进行上传。当大于128MiB时,根据文件的实际大小,PutObject会自动地将对象进行拆分成128MiB一块或更大一些进行上传。对象的最大大小是5TB。 __参数__ @@ -573,7 +587,7 @@ __minio.PutObjectOptions__ | `opts.ContentEncoding` | _string_ | 对象的Content encoding,例如"gzip" | | `opts.ContentDisposition` | _string_ | 对象的Content disposition, "inline" | | `opts.CacheControl` | _string_ | 指定针对请求和响应的缓存机制,例如"max-age=600"| -| `opts.EncryptMaterials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) | +| `opts.EncryptMaterials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go/v7) | __示例__ @@ -889,7 +903,7 @@ if err != nil { ### FPutObject(bucketName, objectName, filePath, opts PutObjectOptions) (length int64, err error) 将filePath对应的文件内容上传到一个对象中。 -当对象小于64MiB时,FPutObject直接在一次PUT请求里进行上传。当大于64MiB时,根据文件的实际大小,FPutObject会自动地将对象进行拆分成64MiB一块或更大一些进行上传。对象的最大大小是5TB。 +当对象小于128MiB时,FPutObject直接在一次PUT请求里进行上传。当大于128MiB时,根据文件的实际大小,FPutObject会自动地将对象进行拆分成128MiB一块或更大一些进行上传。对象的最大大小是5TB。 __参数__ @@ -1176,7 +1190,7 @@ __参数__ |:---|:---| :---| |`bucketName` | _string_ | 存储桶名称 | |`objectName` | _string_ | 对象的名称 | -|`encryptMaterials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) | +|`encryptMaterials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go/v7) | __返回值__ @@ -1233,7 +1247,7 @@ __参数__ |`bucketName` | _string_ |存储桶名称 | |`objectName` | _string_ |对象的名称 | |`reader` | _io.Reader_ |任何实现io.Reader的Go类型 | -|`encryptMaterials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) | +|`encryptMaterials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go/v7) | __示例__ @@ -1295,7 +1309,7 @@ __参数__ |`bucketName` | _string_ |存储桶名称 | |`objectName` | _string_ |对象的名称 | |`filePath` | _string_ |要上传的文件的路径 | -|`encryptMaterials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) | +|`encryptMaterials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go/v7) | __示例__ @@ -1489,7 +1503,7 @@ fmt.Printf("%s\n", url) ### SetBucketPolicy(bucketname, objectPrefix string, policy policy.BucketPolicy) error 给存储桶或者对象前缀设置访问权限。 -必须引入`github.com/minio/minio-go/pkg/policy`包。 +必须引入`github.com/minio/minio-go/v7/pkg/policy`包。 __参数__ @@ -1530,7 +1544,7 @@ if err != nil { ### GetBucketPolicy(bucketName, objectPrefix string) (policy.BucketPolicy, error) 获取存储桶或者对象前缀的访问权限。 -必须引入`github.com/minio/minio-go/pkg/policy`包。 +必须引入`github.com/minio/minio-go/v7/pkg/policy`包。 __参数__ @@ -1779,8 +1793,3 @@ __参数__ | 参数 | 类型 | 描述 | |---|---|---| |`acceleratedEndpoint` | _string_ | 设置新的S3传输加速endpoint。| - - -## 8. 了解更多 - -- [用Go语言创建属于你的音乐播放器APP示例](https://docs.minio.io/docs/go-music-player-app) diff --git a/examples/minio/listen-notification.go b/examples/minio/listen-notification.go new file mode 100644 index 000000000..6a9c54e17 --- /dev/null +++ b/examples/minio/listen-notification.go @@ -0,0 +1,62 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + minioClient, err := minio.New("play.min.io", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + + // s3Client.TraceOn(os.Stderr) + + // Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events. + for notificationInfo := range minioClient.ListenNotification(context.Background(), "PREFIX", "SUFFIX", []string{ + "s3:BucketCreated:*", + "s3:BucketRemoved:*", + "s3:ObjectCreated:*", + "s3:ObjectAccessed:*", + "s3:ObjectRemoved:*", + }) { + if notificationInfo.Err != nil { + log.Fatalln(notificationInfo.Err) + } + log.Println(notificationInfo) + } +} diff --git a/examples/minio/listenbucketnotification.go b/examples/minio/listenbucketnotification.go index 4c48510da..92b5c67c4 100644 --- a/examples/minio/listenbucketnotification.go +++ b/examples/minio/listenbucketnotification.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,25 +36,22 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - minioClient, err := minio.New("play.minio.io:9000", "YOUR-ACCESS", "YOUR-SECRET", true) + minioClient, err := minio.New("play.min.io", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } // s3Client.TraceOn(os.Stderr) - // Create a done channel to control 'ListenBucketNotification' go routine. - doneCh := make(chan struct{}) - - // Indicate to our routine to exit cleanly upon return. - defer close(doneCh) - // Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events. - for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{ + for notificationInfo := range minioClient.ListenBucketNotification(context.Background(), "YOUR-BUCKET", "PREFIX", "SUFFIX", []string{ "s3:ObjectCreated:*", "s3:ObjectAccessed:*", "s3:ObjectRemoved:*", - }, doneCh) { + }) { if notificationInfo.Err != nil { log.Fatalln(notificationInfo.Err) } diff --git a/examples/s3/bucketexists.go b/examples/s3/bucketexists.go index 20dea30a3..eafbb5568 100644 --- a/examples/s3/bucketexists.go +++ b/examples/s3/bucketexists.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,12 +36,15 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - found, err := s3Client.BucketExists("my-bucketname") + found, err := s3Client.BucketExists(context.Background(), "my-bucketname") if err != nil { log.Fatalln(err) } diff --git a/examples/s3/composeobject.go b/examples/s3/composeobject.go index 2f76ff053..2b9467a0f 100644 --- a/examples/s3/composeobject.go +++ b/examples/s3/composeobject.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,12 @@ package main import ( + "context" "log" - minio "github.com/minio/minio-go" + minio "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/encrypt" ) func main() { @@ -34,7 +37,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -44,35 +50,48 @@ func main() { // Prepare source decryption key (here we assume same key to // decrypt all source objects.) - decKey := minio.NewSSEInfo([]byte{1, 2, 3}, "") + decKey, _ := encrypt.NewSSEC([]byte{1, 2, 3}) // Source objects to concatenate. We also specify decryption // key for each - src1 := minio.NewSourceInfo("bucket1", "object1", &decKey) - src1.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a") + src1 := minio.CopySrcOptions{ + Bucket: "bucket1", + Object: "object1", + Encryption: decKey, + MatchETag: "31624deb84149d2f8ef9c385918b653a", + } - src2 := minio.NewSourceInfo("bucket2", "object2", &decKey) - src2.SetMatchETagCond("f8ef9c385918b653a31624deb84149d2") + src2 := minio.CopySrcOptions{ + Bucket: "bucket2", + Object: "object2", + Encryption: decKey, + MatchETag: "f8ef9c385918b653a31624deb84149d2", + } - src3 := minio.NewSourceInfo("bucket3", "object3", &decKey) - src3.SetMatchETagCond("5918b653a31624deb84149d2f8ef9c38") + src3 := minio.CopySrcOptions{ + Bucket: "bucket3", + Object: "object3", + Encryption: decKey, + MatchETag: "5918b653a31624deb84149d2f8ef9c38", + } // Create slice of sources. srcs := []minio.SourceInfo{src1, src2, src3} // Prepare destination encryption key - encKey := minio.NewSSEInfo([]byte{8, 9, 0}, "") + encKey, _ := encrypt.NewSSEC([]byte{8, 9, 0}) // Create destination info - dst, err := minio.NewDestinationInfo("bucket", "object", &encKey, nil) - if err != nil { - log.Fatalln(err) + dst := minio.CopyDestOptions{ + Bucket: "bucket", + Object: "object", + Encryption: encKey, } - err = s3Client.ComposeObject(dst, srcs) + uploadInfo, err := s3Client.ComposeObject(context.Background(), dst, srcs...) if err != nil { log.Fatalln(err) } - log.Println("Composed object successfully.") + log.Println("Composed object successfully:", uploadInfo) } diff --git a/examples/s3/copyobject-with-new-tags.go b/examples/s3/copyobject-with-new-tags.go new file mode 100644 index 000000000..60c2ac0b1 --- /dev/null +++ b/examples/s3/copyobject-with-new-tags.go @@ -0,0 +1,78 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "log" + "time" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and + // my-objectname are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + + // Enable trace. + // s3Client.TraceOn(os.Stderr) + + // Source object + src := minio.CopySrcOptions{ + Bucket: "my-sourcebucketname", + Object: "my-sourceobjectname", + // All following conditions are allowed and can be combined together. + // Set modified condition, copy object modified since 2014 April. + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + } + + // Destination object + dst := minio.CopyDestOptions{ + Bucket: "my-bucketname", + Object: "my-objectname", + ReplaceTags: true, + UserTags: map[string]string{ + "Tag1": "Value1", + "Tag2": "Value2", + }, + } + + // Initiate copy object. + ui, err := s3Client.CopyObject(context.Background(), dst, src) + if err != nil { + log.Fatalln(err) + } + + log.Printf("Copied %s, successfully to %s - UploadInfo %v\n", dst, src, ui) +} diff --git a/examples/s3/copyobject.go b/examples/s3/copyobject.go index a7c3eca45..989ac2ee0 100644 --- a/examples/s3/copyobject.go +++ b/examples/s3/copyobject.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,10 +20,12 @@ package main import ( + "context" "log" "time" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -35,7 +37,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -44,32 +49,31 @@ func main() { // s3Client.TraceOn(os.Stderr) // Source object - src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil) - - // All following conditions are allowed and can be combined together. - - // Set modified condition, copy object modified since 2014 April. - src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) - - // Set unmodified condition, copy object unmodified since 2014 April. - // src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) - - // Set matching ETag condition, copy object which matches the following ETag. - // src.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a") - - // Set matching ETag except condition, copy object which does not match the following ETag. - // src.SetMatchETagExceptCond("31624deb84149d2f8ef9c385918b653a") + src := minio.CopySrcOptions{ + Bucket: "my-sourcebucketname", + Object: "my-sourceobjectname", + // All following conditions are allowed and can be combined together. + // Set modified condition, copy object modified since 2014 April. + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + // Set unmodified condition, copy object unmodified since 2014 April. + // MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + // Set matching ETag condition, copy object which matches the following ETag. + // MatchETag: "31624deb84149d2f8ef9c385918b653a", + // Set matching ETag copy object which does not match the following ETag. + // NoMatchETag: "31624deb84149d2f8ef9c385918b653a", + } // Destination object - dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil) - if err != nil { - log.Fatalln(err) + dst := minio.CopyDestOptions{ + Bucket: "my-bucketname", + Object: "my-objectname", } // Initiate copy object. - err = s3Client.CopyObject(dst, src) + ui, err = s3Client.CopyObject(context.Background(), dst, src) if err != nil { log.Fatalln(err) } - log.Println("Copied source object /my-sourcebucketname/my-sourceobjectname to destination /my-bucketname/my-objectname Successfully.") + + log.Printf("Copied %s, successfully to %s - UploadInfo %v\n", dst, src, ui) } diff --git a/examples/s3/disableversioning.go b/examples/s3/disableversioning.go new file mode 100644 index 000000000..a0854d45d --- /dev/null +++ b/examples/s3/disableversioning.go @@ -0,0 +1,52 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "log" + + minio "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + + err = s3Client.DisableVersioning(context.Background(), "my-bucketname") + if err != nil { + log.Fatalln(err) + } + log.Println("Disabled") +} diff --git a/examples/s3/enableversioning.go b/examples/s3/enableversioning.go new file mode 100644 index 000000000..6876d444f --- /dev/null +++ b/examples/s3/enableversioning.go @@ -0,0 +1,52 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "log" + + minio "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + + err = s3Client.EnableVersioning(context.Background(), "my-bucketname") + if err != nil { + log.Fatalln(err) + } + log.Println("Enabled") +} diff --git a/examples/s3/fgetobject.go b/examples/s3/fgetobject.go index 819a34f91..cbd17f798 100644 --- a/examples/s3/fgetobject.go +++ b/examples/s3/fgetobject.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,12 +36,15 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - if err := s3Client.FGetObject("my-bucketname", "my-objectname", "my-filename.csv", minio.GetObjectOptions{}); err != nil { + if err := s3Client.FGetObject(context.Background(), "my-bucketname", "my-objectname", "my-filename.csv", minio.GetObjectOptions{}); err != nil { log.Fatalln(err) } log.Println("Successfully saved my-filename.csv") diff --git a/examples/s3/fputencrypted-object.go b/examples/s3/fputencrypted-object.go index 5da9f9d71..9bea03719 100644 --- a/examples/s3/fputencrypted-object.go +++ b/examples/s3/fputencrypted-object.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,11 +20,12 @@ package main import ( + "context" "log" - "github.com/minio/minio-go/pkg/encrypt" - - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/encrypt" ) func main() { @@ -36,7 +37,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -50,10 +54,10 @@ func main() { encryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketname+objectName)) // Encrypt file content and upload to the server - n, err := s3Client.FPutObject(bucketname, objectName, filePath, minio.PutObjectOptions{ServerSideEncryption: encryption}) + uploadedInfo, err := s3Client.FPutObject(context.Background(), bucketname, objectName, filePath, minio.PutObjectOptions{ServerSideEncryption: encryption}) if err != nil { log.Fatalln(err) } - log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.") + log.Println("Uploaded", "my-objectname:", uploadedInfo) } diff --git a/examples/s3/fputobject.go b/examples/s3/fputobject.go index 34d876804..bb4ffc8f0 100644 --- a/examples/s3/fputobject.go +++ b/examples/s3/fputobject.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,12 +36,15 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{ + if _, err := s3Client.FPutObject(context.Background(), "my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{ ContentType: "application/csv", }); err != nil { log.Fatalln(err) diff --git a/examples/s3/get-encrypted-object.go b/examples/s3/get-encrypted-object.go index 62a06d59f..fdfe5525e 100644 --- a/examples/s3/get-encrypted-object.go +++ b/examples/s3/get-encrypted-object.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,12 +20,14 @@ package main import ( + "context" "io" "log" "os" - "github.com/minio/minio-go" - "github.com/minio/minio-go/pkg/encrypt" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/encrypt" ) func main() { @@ -37,7 +39,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -50,7 +55,7 @@ func main() { encryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketname+objectName)) // Get the encrypted object - reader, err := s3Client.GetObject(bucketname, objectName, minio.GetObjectOptions{ServerSideEncryption: encryption}) + reader, err := s3Client.GetObject(context.Background(), bucketname, objectName, minio.GetObjectOptions{ServerSideEncryption: encryption}) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/getbucketencryption.go b/examples/s3/getbucketencryption.go new file mode 100644 index 000000000..0d84f38fb --- /dev/null +++ b/examples/s3/getbucketencryption.go @@ -0,0 +1,57 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "fmt" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + + // s3Client.TraceOn(os.Stderr) + + // Get default encryption configuration set on an S3 bucket, + // and print out the encryption configuration. + encryptionConfig, err := s3Client.GetBucketEncryption(context.Background(), "my-bucketname") + if err != nil { + log.Fatalln(err) + } + fmt.Printf("%+v\n", encryptionConfig) +} diff --git a/examples/s3/getbucketlifecycle.go b/examples/s3/getbucketlifecycle.go index 2e3ef419e..c090fb927 100644 --- a/examples/s3/getbucketlifecycle.go +++ b/examples/s3/getbucketlifecycle.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,12 +20,14 @@ package main import ( + "context" "io" "log" "os" "strings" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -37,7 +39,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -45,7 +50,7 @@ func main() { // s3Client.TraceOn(os.Stderr) // Get bucket lifecycle from S3 - lifecycle, err := s3Client.GetBucketLifecycle("my-bucketname") + lifecycle, err := s3Client.GetBucketLifecycle(context.Background(), "my-bucketname") if err != nil { log.Fatalln(err) } diff --git a/examples/s3/getbucketnotification.go b/examples/s3/getbucketnotification.go index 19349baaf..efafac865 100644 --- a/examples/s3/getbucketnotification.go +++ b/examples/s3/getbucketnotification.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,14 +36,17 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } // s3Client.TraceOn(os.Stderr) - notifications, err := s3Client.GetBucketNotification("my-bucketname") + notifications, err := s3Client.GetBucketNotification(context.Background(), "my-bucketname") if err != nil { log.Fatalln(err) } diff --git a/examples/s3/getbucketpolicy.go b/examples/s3/getbucketpolicy.go index e5b594057..1cf2e2486 100644 --- a/examples/s3/getbucketpolicy.go +++ b/examples/s3/getbucketpolicy.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,12 +20,14 @@ package main import ( + "context" "io" "log" "os" "strings" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -37,14 +39,17 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } // s3Client.TraceOn(os.Stderr) - policy, err := s3Client.GetBucketPolicy("my-bucketname") + policy, err := s3Client.GetBucketPolicy(context.Background(), "my-bucketname") if err != nil { log.Fatalln(err) } diff --git a/examples/s3/getbucketreplication.go b/examples/s3/getbucketreplication.go new file mode 100644 index 000000000..aed17442d --- /dev/null +++ b/examples/s3/getbucketreplication.go @@ -0,0 +1,66 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "encoding/json" + "log" + "os" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + // s3Client.TraceOn(os.Stderr) + // Get bucket replication configuration from S3 + replicationCfg, err := s3Client.GetBucketReplication(context.Background(), "my-bucketname") + if err != nil { + log.Fatalln(err) + } + // Create replication config file + localReplicationCfgFile, err := os.Create("replication.xml") + if err != nil { + log.Fatalln(err) + } + defer localReplicationCfgFile.Close() + + replBytes, err := json.Marshal(replicationCfg) + if err != nil { + log.Fatalln(err) + } + localReplicationCfgFile.Write(replBytes) +} diff --git a/examples/s3/getbuckettagging.go b/examples/s3/getbuckettagging.go new file mode 100644 index 000000000..9d2067e66 --- /dev/null +++ b/examples/s3/getbuckettagging.go @@ -0,0 +1,53 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "fmt" + "log" + + minio "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and + // my-objectname are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + + tags, err := s3Client.GetBucketTagging(context.Background(), "my-bucketname") + if err != nil { + log.Fatalln(err) + } + fmt.Printf("Fetched Object Tags: %s", tags) +} diff --git a/examples/s3/getbucketversioning.go b/examples/s3/getbucketversioning.go new file mode 100644 index 000000000..9b55d892d --- /dev/null +++ b/examples/s3/getbucketversioning.go @@ -0,0 +1,57 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "fmt" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + + // s3Client.TraceOn(os.Stderr) + + // Get versioning configuration set on an S3 bucket, + // and print out the versioning configuration. + versioningConfig, err := s3Client.GetBucketVersioning(context.Background(), "my-bucketname") + if err != nil { + log.Fatalln(err) + } + fmt.Printf("%+v\n", versioningConfig) +} diff --git a/examples/s3/getobject-client-encryption.go b/examples/s3/getobject-client-encryption.go index 6b06073c9..7d75baaa8 100644 --- a/examples/s3/getobject-client-encryption.go +++ b/examples/s3/getobject-client-encryption.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2018 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,11 +20,13 @@ package main import ( + "context" "log" "os" "path" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/sio" "golang.org/x/crypto/argon2" ) @@ -38,12 +40,15 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - obj, err := s3Client.GetObject("my-bucketname", "my-objectname", minio.GetObjectOptions{}) + obj, err := s3Client.GetObject(context.Background(), "my-bucketname", "my-objectname", minio.GetObjectOptions{}) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/getobject.go b/examples/s3/getobject.go index e17ef8172..7fec93f12 100644 --- a/examples/s3/getobject.go +++ b/examples/s3/getobject.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,11 +20,13 @@ package main import ( + "context" "io" "log" "os" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -36,12 +38,15 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - reader, err := s3Client.GetObject("my-bucketname", "my-objectname", minio.GetObjectOptions{}) + reader, err := s3Client.GetObject(context.Background(), "my-bucketname", "my-objectname", minio.GetObjectOptions{}) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/getobjectacl.go b/examples/s3/getobjectacl.go index f2bbd95d5..0581ce1e8 100644 --- a/examples/s3/getobjectacl.go +++ b/examples/s3/getobjectacl.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2018-2019 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018-2019 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,10 +20,12 @@ package main import ( + "context" "fmt" "log" - minio "github.com/minio/minio-go" + minio "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -35,14 +37,30 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + + objectInfo, err := s3Client.GetObjectACL(context.Background(), "my-bucketname", "my-objectname") if err != nil { log.Fatalln(err) } - objectInfo, err := s3Client.GetObjectACL("my-bucketname", "my-objectname") - if err != nil { - log.Fatalln(err) + //print object owner informations + fmt.Printf(`Object owner: +Display name: %q +ID: %q +`, objectInfo.Owner.DisplayName, objectInfo.Owner.ID) + + //print object grant informations + for _, g := range objectInfo.Grant { + fmt.Printf(`Object grant: + - Display name: %q + - ID: %q + - URI: %q + - Permission: %q +`, g.Grantee.DisplayName, g.Grantee.ID, g.Grantee.URI, g.Permission) } //print all value header (acl, metadata, standard header value...) diff --git a/examples/s3/getobjectlegalhold.go b/examples/s3/getobjectlegalhold.go new file mode 100644 index 000000000..018b96f9e --- /dev/null +++ b/examples/s3/getobjectlegalhold.go @@ -0,0 +1,54 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "fmt" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and + // my-testfile are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + opts := minio.GetObjectLegalHoldOptions{} + lh, err := s3Client.GetObjectLegalHold(context.Background(), "my-bucket", "my-object", opts) + if err != nil { + log.Fatalln(err) + } + fmt.Printf("Legal Hold on object is %s", lh) + log.Println("Get object legal-hold on my-object successfully.") +} diff --git a/examples/s3/getobjectlockconfig.go b/examples/s3/getobjectlockconfig.go new file mode 100644 index 000000000..f00ec0ec7 --- /dev/null +++ b/examples/s3/getobjectlockconfig.go @@ -0,0 +1,62 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "fmt" + "log" + + minio "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + + // s3Client.TraceOn(os.Stderr) + + // Get object lock configuration. + enabled, mode, validity, unit, err := s3Client.GetObjectLockConfig(context.Background(), "tbucket13a") + if err != nil { + log.Fatalln(err) + } + fmt.Printf("object lock is %v for bucket 'my-bucketname'\n", enabled) + + if mode != nil { + fmt.Printf("%v mode is enabled for %v %v for bucket 'my-bucketname'\n", *mode, *validity, *unit) + } else { + fmt.Println("No mode is enabled for bucket 'my-bucketname'") + } +} diff --git a/examples/s3/fgetobject-context.go b/examples/s3/getobjectretention.go similarity index 62% rename from examples/s3/fgetobject-context.go rename to examples/s3/getobjectretention.go index 6004baa14..8f19c713d 100644 --- a/examples/s3/fgetobject-context.go +++ b/examples/s3/getobjectretention.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,35 +20,32 @@ package main import ( - "log" - "time" - "context" + "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname - // and my-filename.csv are dummy values, please replace them with original values. + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and + // my-testfile are dummy values, please replace them with original values. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. // This boolean value is the last argument for New(). // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancel() - - if err := s3Client.FGetObjectWithContext(ctx, "my-bucketname", "my-objectname", "my-filename.csv", minio.GetObjectOptions{}); err != nil { + m, t, err := s3Client.GetObjectRetention(context.Background(), "my-bucket", "my-object", "") + if err != nil { log.Fatalln(err) } - log.Println("Successfully saved my-filename.csv") - + log.Println("Get object retention successful, Mode: ", m.String(), " Retainuntil Date ", t.String()) } diff --git a/examples/s3/getobjecttagging.go b/examples/s3/getobjecttagging.go new file mode 100644 index 000000000..09674f399 --- /dev/null +++ b/examples/s3/getobjecttagging.go @@ -0,0 +1,54 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "fmt" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and + // my-objectname are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + + tags, err := s3Client.GetObjectTagging(context.Background(), "my-bucketname", "my-objectname") + if err != nil { + log.Fatalln(err) + } + + fmt.Printf("Fetched Object Tags: %s", tags) +} diff --git a/examples/s3/listbuckets.go b/examples/s3/listbuckets.go index 5eae587b4..348366218 100644 --- a/examples/s3/listbuckets.go +++ b/examples/s3/listbuckets.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,12 +36,15 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - buckets, err := s3Client.ListBuckets() + buckets, err := s3Client.ListBuckets(context.Background()) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/listincompleteuploads.go b/examples/s3/listincompleteuploads.go index a5a79b603..adcc10561 100644 --- a/examples/s3/listincompleteuploads.go +++ b/examples/s3/listincompleteuploads.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,10 +20,12 @@ package main import ( + "context" "fmt" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -35,19 +37,16 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - // Create a done channel to control 'ListObjects' go routine. - doneCh := make(chan struct{}) - - // Indicate to our routine to exit cleanly upon return. - defer close(doneCh) - // List all multipart uploads from a bucket-name with a matching prefix. - for multipartObject := range s3Client.ListIncompleteUploads("my-bucketname", "my-prefixname", true, doneCh) { + for multipartObject := range s3Client.ListIncompleteUploads(context.Background(), "my-bucketname", "my-prefixname", true) { if multipartObject.Err != nil { fmt.Println(multipartObject.Err) return diff --git a/examples/s3/listobjects-N.go b/examples/s3/listobjects-N.go index 55bceb470..0aaa9852b 100644 --- a/examples/s3/listobjects-N.go +++ b/examples/s3/listobjects-N.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "fmt" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,7 +36,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { fmt.Println(err) return @@ -49,7 +54,7 @@ func main() { defer close(doneCh) i := 1 - for object := range s3Client.ListObjects(bucket, prefix, recursive, doneCh) { + for object := range s3Client.ListObjects(context.Background(), bucket, prefix, recursive, doneCh) { if object.Err != nil { return nil, object.Err } diff --git a/examples/s3/listobjects.go b/examples/s3/listobjects.go index 1da2e3faa..3dd0bf36c 100644 --- a/examples/s3/listobjects.go +++ b/examples/s3/listobjects.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "fmt" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,7 +36,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { fmt.Println(err) return @@ -47,7 +52,7 @@ func main() { defer close(doneCh) // List all objects from a bucket-name with a matching prefix. - for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) { + for object := range s3Client.ListObjects(context.Background(), "my-bucketname", "my-prefixname", true, doneCh) { if object.Err != nil { fmt.Println(object.Err) return diff --git a/examples/s3/listobjectsV2.go b/examples/s3/listobjectsV2.go index 190aec36b..99dfecbeb 100644 --- a/examples/s3/listobjectsV2.go +++ b/examples/s3/listobjectsV2.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "fmt" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,7 +36,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { fmt.Println(err) return @@ -47,7 +52,7 @@ func main() { defer close(doneCh) // List all objects from a bucket-name with a matching prefix. - for object := range s3Client.ListObjectsV2("my-bucketname", "my-prefixname", true, doneCh) { + for object := range s3Client.ListObjectsV2(context.Background(), "my-bucketname", "my-prefixname", true, doneCh) { if object.Err != nil { fmt.Println(object.Err) return diff --git a/examples/s3/listobjectsV2WithMetadata.go b/examples/s3/listobjectsV2WithMetadata.go new file mode 100644 index 000000000..34294b171 --- /dev/null +++ b/examples/s3/listobjectsV2WithMetadata.go @@ -0,0 +1,63 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "fmt" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname + // are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + fmt.Println(err) + return + } + + // Create a done channel to control 'ListObjects' go routine. + doneCh := make(chan struct{}) + + // Indicate to our routine to exit cleanly upon return. + defer close(doneCh) + + // List all objects from a bucket-name with a matching prefix. + for object := range s3Client.ListObjectsV2WithMetadata(context.Background(), "my-bucketname", "my-prefixname", true, doneCh) { + if object.Err != nil { + fmt.Println(object.Err) + return + } + fmt.Println(object) + } + return +} diff --git a/examples/s3/listobjectversions.go b/examples/s3/listobjectversions.go new file mode 100644 index 000000000..518544466 --- /dev/null +++ b/examples/s3/listobjectversions.go @@ -0,0 +1,63 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "fmt" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname + // are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + fmt.Println(err) + return + } + + // Create a done channel to control 'ListObjects' go routine. + doneCh := make(chan struct{}) + + // Indicate to our routine to exit cleanly upon return. + defer close(doneCh) + + // List all objects from a bucket-name with a matching prefix. + for objectVersion := range s3Client.ListObjectVersions(context.Background(), "my-bucketname", "my-prefixname", true, doneCh) { + if objectVersion.Err != nil { + fmt.Println(objectVersion.Err) + return + } + fmt.Println(objectVersion) + } + return +} diff --git a/examples/s3/makebucket.go b/examples/s3/makebucket.go index 419c96cf2..b136af1e0 100644 --- a/examples/s3/makebucket.go +++ b/examples/s3/makebucket.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,12 +36,15 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - err = s3Client.MakeBucket("my-bucketname", "us-east-1") + err = s3Client.MakeBucket(context.Background(), "my-bucketname", "us-east-1") if err != nil { log.Fatalln(err) } diff --git a/examples/s3/presignedgetobject.go b/examples/s3/presignedgetobject.go index fd7fb9e8d..731727ff5 100644 --- a/examples/s3/presignedgetobject.go +++ b/examples/s3/presignedgetobject.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,11 +20,13 @@ package main import ( + "context" "log" "net/url" "time" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -36,7 +38,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -46,7 +51,7 @@ func main() { reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"") // Gernerate presigned get object url. - presignedURL, err := s3Client.PresignedGetObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second, reqParams) + presignedURL, err := s3Client.PresignedGetObject(context.Background(), "my-bucketname", "my-objectname", time.Duration(1000)*time.Second, reqParams) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/presignedheadobject.go b/examples/s3/presignedheadobject.go index 8dbc0a4b7..2d9afccd1 100644 --- a/examples/s3/presignedheadobject.go +++ b/examples/s3/presignedheadobject.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,11 +20,13 @@ package main import ( + "context" "log" "net/url" "time" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -36,7 +38,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -46,7 +51,7 @@ func main() { reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"") // Gernerate presigned get object url. - presignedURL, err := s3Client.PresignedHeadObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second, reqParams) + presignedURL, err := s3Client.PresignedHeadObject(context.Background(), "my-bucketname", "my-objectname", time.Duration(1000)*time.Second, reqParams) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/presignedpostpolicy.go b/examples/s3/presignedpostpolicy.go index 205ac95a3..e5fe1523c 100644 --- a/examples/s3/presignedpostpolicy.go +++ b/examples/s3/presignedpostpolicy.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,11 +20,13 @@ package main import ( + "context" "fmt" "log" "time" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -36,7 +38,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -47,7 +52,7 @@ func main() { // Expires in 10 days. policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // Returns form data for POST form request. - url, formData, err := s3Client.PresignedPostPolicy(policy) + url, formData, err := s3Client.PresignedPostPolicy(context.Background(), policy) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/presignedputobject.go b/examples/s3/presignedputobject.go index b2f8b4f82..fed3ce4b0 100644 --- a/examples/s3/presignedputobject.go +++ b/examples/s3/presignedputobject.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,10 +20,12 @@ package main import ( + "context" "log" "time" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -35,12 +37,15 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - presignedURL, err := s3Client.PresignedPutObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second) + presignedURL, err := s3Client.PresignedPutObject(context.Background(), "my-bucketname", "my-objectname", time.Duration(1000)*time.Second) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/put-encrypted-object.go b/examples/s3/put-encrypted-object.go index 48b93671e..e0c42f66b 100644 --- a/examples/s3/put-encrypted-object.go +++ b/examples/s3/put-encrypted-object.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,11 +20,13 @@ package main import ( + "context" "log" "os" - "github.com/minio/minio-go" - "github.com/minio/minio-go/pkg/encrypt" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/encrypt" ) func main() { @@ -36,7 +38,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -64,7 +69,7 @@ func main() { encryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketname+objectName)) // Encrypt file content and upload to the server - n, err := s3Client.PutObject(bucketname, objectName, file, fstat.Size(), minio.PutObjectOptions{ServerSideEncryption: encryption}) + n, err := s3Client.PutObject(context.Background(), bucketname, objectName, file, fstat.Size(), minio.PutObjectOptions{ServerSideEncryption: encryption}) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/putobject-client-encryption.go b/examples/s3/putobject-client-encryption.go index 77d83b400..04d6055b3 100644 --- a/examples/s3/putobject-client-encryption.go +++ b/examples/s3/putobject-client-encryption.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2018 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,11 +20,13 @@ package main import ( + "context" "log" "os" "path" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/sio" "golang.org/x/crypto/argon2" ) @@ -38,7 +40,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -67,7 +72,7 @@ func main() { if err != nil { log.Fatalln(err) } - _, err = s3Client.PutObject("my-bucketname", "my-objectname", encrypted, int64(encSize), minio.PutObjectOptions{}) + _, err = s3Client.PutObject(context.Background(), "my-bucketname", "my-objectname", encrypted, int64(encSize), minio.PutObjectOptions{}) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/putobject-getobject-sse.go b/examples/s3/putobject-getobject-sse.go index 4e459b5d7..2764a2034 100644 --- a/examples/s3/putobject-getobject-sse.go +++ b/examples/s3/putobject-getobject-sse.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,12 +21,13 @@ package main import ( "bytes" + "context" "io/ioutil" "log" - "github.com/minio/minio-go/pkg/encrypt" - - minio "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/encrypt" ) func main() { @@ -35,7 +36,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - minioClient, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + minioClient, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -45,14 +49,14 @@ func main() { object := []byte("Hello again") encryption := encrypt.DefaultPBKDF([]byte("my secret password"), []byte(bucketName+objectName)) - _, err = minioClient.PutObject(bucketName, objectName, bytes.NewReader(object), int64(len(object)), minio.PutObjectOptions{ + _, err = minioClient.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(object), int64(len(object)), minio.PutObjectOptions{ ServerSideEncryption: encryption, }) if err != nil { log.Fatalln(err) } - reader, err := minioClient.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: encryption}) + reader, err := minioClient.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: encryption}) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/putobject-progress.go b/examples/s3/putobject-progress.go index 0e92dd65e..82b3228b6 100644 --- a/examples/s3/putobject-progress.go +++ b/examples/s3/putobject-progress.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,10 +20,12 @@ package main import ( + "context" "log" "github.com/cheggaaa/pb" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -35,12 +37,15 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - reader, err := s3Client.GetObject("my-bucketname", "my-objectname", minio.GetObjectOptions{}) + reader, err := s3Client.GetObject(context.Background(), "my-bucketname", "my-objectname", minio.GetObjectOptions{}) if err != nil { log.Fatalln(err) } @@ -55,7 +60,7 @@ func main() { // the Reads inside. progress := pb.New64(objectInfo.Size) progress.Start() - n, err := s3Client.PutObject("my-bucketname", "my-objectname-progress", reader, objectInfo.Size, minio.PutObjectOptions{ContentType: "application/octet-stream", Progress: progress}) + n, err := s3Client.PutObject(context.Background(), "my-bucketname", "my-objectname-progress", reader, objectInfo.Size, minio.PutObjectOptions{ContentType: "application/octet-stream", Progress: progress}) if err != nil { log.Fatalln(err) diff --git a/examples/s3/putobject-s3-accelerate.go b/examples/s3/putobject-s3-accelerate.go index 06345cd87..f79564217 100644 --- a/examples/s3/putobject-s3-accelerate.go +++ b/examples/s3/putobject-s3-accelerate.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,10 +20,12 @@ package main import ( + "context" "log" "os" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -35,7 +37,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -54,7 +59,7 @@ func main() { log.Fatalln(err) } - n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) + n, err := s3Client.PutObject(context.Background(), "my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/putobject-streaming.go b/examples/s3/putobject-streaming.go index 85b78dd45..3ccdbcaca 100644 --- a/examples/s3/putobject-streaming.go +++ b/examples/s3/putobject-streaming.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,10 +20,12 @@ package main import ( + "context" "log" "os" - minio "github.com/minio/minio-go" + minio "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -35,7 +37,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -46,7 +51,7 @@ func main() { } defer object.Close() - n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, -1, minio.PutObjectOptions{}) + n, err := s3Client.PutObject(context.Background(), "my-bucketname", "my-objectname", object, -1, minio.PutObjectOptions{}) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/putobject-context.go b/examples/s3/putobject-with-tags.go similarity index 70% rename from examples/s3/putobject-context.go rename to examples/s3/putobject-with-tags.go index acc923f7e..4b8b9b9b2 100644 --- a/examples/s3/putobject-context.go +++ b/examples/s3/putobject-with-tags.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,13 +20,12 @@ package main import ( + "context" "log" "os" - "time" - - "context" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -38,29 +37,28 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancel() - object, err := os.Open("my-testfile") if err != nil { log.Fatalln(err) } defer object.Close() - objectStat, err := object.Stat() if err != nil { log.Fatalln(err) } - - n, err := s3Client.PutObjectWithContext(ctx, "my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ - ContentType: "application/octet-stream", - }) + tags := map[string]string{ + "Tag1": "Value1", + "Tag2": "Value2", + } + n, err := s3Client.PutObject(context.Background(), "my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream", UserTags: tags}) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/putobject.go b/examples/s3/putobject.go index b9e4ff16c..7f6071a38 100644 --- a/examples/s3/putobject.go +++ b/examples/s3/putobject.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,10 +20,12 @@ package main import ( + "context" "log" "os" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -35,7 +37,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -50,7 +55,7 @@ func main() { log.Fatalln(err) } - n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) + n, err := s3Client.PutObject(context.Background(), "my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/fputobject-context.go b/examples/s3/putobjectlegalhold.go similarity index 60% rename from examples/s3/fputobject-context.go rename to examples/s3/putobjectlegalhold.go index d7c941c2b..453628a06 100644 --- a/examples/s3/fputobject-context.go +++ b/examples/s3/putobjectlegalhold.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,34 +20,36 @@ package main import ( - "log" - "time" - "context" + "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname - // and my-filename.csv are dummy values, please replace them with original values. + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and + // my-testfile are dummy values, please replace them with original values. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. // This boolean value is the last argument for New(). // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancel() - - if _, err := s3Client.FPutObjectWithContext(ctx, "my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{ContentType: "application/csv"}); err != nil { + s := minio.LegalHoldEnabled + opts := minio.PutObjectLegalHoldOptions{ + Status: &s, + } + err = s3Client.PutObjectLegalHold(context.Background(), "my-bucket", "my-object", opts) + if err != nil { log.Fatalln(err) } - log.Println("Successfully uploaded my-filename.csv") + log.Println("Set object legal-hold on my-object successfully.") } diff --git a/examples/s3/getobject-context.go b/examples/s3/putobjectretention.go similarity index 58% rename from examples/s3/getobject-context.go rename to examples/s3/putobjectretention.go index c7d41707a..a519f44a2 100644 --- a/examples/s3/getobject-context.go +++ b/examples/s3/putobjectretention.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,14 +20,12 @@ package main import ( - "io" + "context" "log" - "os" "time" - "context" - - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -39,35 +37,23 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true) - if err != nil { - log.Fatalln(err) - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancel() - - opts := minio.GetObjectOptions{} - opts.SetModified(time.Now().Round(10 * time.Minute)) // get object if was modified within the last 10 minutes - reader, err := s3Client.GetObjectWithContext(ctx, "my-bucketname", "my-objectname", opts) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - defer reader.Close() - - localFile, err := os.Create("my-testfile") - if err != nil { - log.Fatalln(err) + t := time.Date(2020, time.November, 18, 14, 0, 0, 0, time.UTC) + m := minio.RetentionMode(minio.Governance) + opts := minio.PutObjectRetentionOptions{ + GovernanceBypass: true, + RetainUntilDate: &t, + Mode: &m, } - defer localFile.Close() - - stat, err := reader.Stat() + err = s3Client.PutObjectRetention(context.Background(), "my-bucket", "my-object", opts) if err != nil { log.Fatalln(err) } - - if _, err := io.CopyN(localFile, reader, stat.Size); err != nil { - log.Fatalln(err) - } + log.Println("Set object retention on my-object successfully.") } diff --git a/examples/s3/putobjecttagging.go b/examples/s3/putobjecttagging.go new file mode 100644 index 000000000..8ba1c6fb1 --- /dev/null +++ b/examples/s3/putobjecttagging.go @@ -0,0 +1,59 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/tags" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and + // my-objectname are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + tagMap := map[string]string{ + "Tag1": "Value1", + "Tag2": "Value2", + } + t, err := tags.MapToObjectTags(tagMap) + if err != nil { + log.Fatalln(err) + } + err = s3Client.PutObjectTagging(context.Background(), "my-bucketname", "my-objectname", t) + if err != nil { + log.Fatalln(err) + } +} diff --git a/examples/s3/removeallbucketnotification.go b/examples/s3/removeallbucketnotification.go index 1186afad8..c6caa67d9 100644 --- a/examples/s3/removeallbucketnotification.go +++ b/examples/s3/removeallbucketnotification.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,14 +36,17 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } // s3Client.TraceOn(os.Stderr) - err = s3Client.RemoveAllBucketNotification("my-bucketname") + err = s3Client.RemoveAllBucketNotification(context.Background(), "my-bucketname") if err != nil { log.Fatalln(err) } diff --git a/examples/s3/removebucket.go b/examples/s3/removebucket.go index 7a7737ee0..581776ca5 100644 --- a/examples/s3/removebucket.go +++ b/examples/s3/removebucket.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,13 +36,16 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } // This operation will only work if your bucket is empty. - err = s3Client.RemoveBucket("my-bucketname") + err = s3Client.RemoveBucket(context.Background(), "my-bucketname") if err != nil { log.Fatalln(err) } diff --git a/examples/s3/removebucketencryption.go b/examples/s3/removebucketencryption.go new file mode 100644 index 000000000..b06a93762 --- /dev/null +++ b/examples/s3/removebucketencryption.go @@ -0,0 +1,54 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + + // s3Client.TraceOn(os.Stderr) + + // Get default encryption configuration set on a S3 bucket + err = s3Client.RemoveBucketEncryption(context.Background(), "my-bucketname") + if err != nil { + log.Fatalln(err) + } +} diff --git a/examples/s3/removebucketreplication.go b/examples/s3/removebucketreplication.go new file mode 100644 index 000000000..32d4905d2 --- /dev/null +++ b/examples/s3/removebucketreplication.go @@ -0,0 +1,55 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + + // s3Client.TraceOn(os.Stderr) + + // Remove replication configuration on a bucket + err = s3Client.RemoveBucketReplication(context.Background(), "my-bucketname") + + if err != nil { + log.Fatalln(err) + } +} diff --git a/examples/s3/removebuckettagging.go b/examples/s3/removebuckettagging.go new file mode 100644 index 000000000..99a8e5db5 --- /dev/null +++ b/examples/s3/removebuckettagging.go @@ -0,0 +1,50 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "log" + + minio "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and + // my-objectname are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + err = s3Client.RemoveBucketTagging(context.Background(), "my-bucketname") + if err != nil { + log.Fatalln(err) + } +} diff --git a/examples/s3/removeincompleteupload.go b/examples/s3/removeincompleteupload.go index 31cc8790b..cf16e0e4c 100644 --- a/examples/s3/removeincompleteupload.go +++ b/examples/s3/removeincompleteupload.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,12 +36,15 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - err = s3Client.RemoveIncompleteUpload("my-bucketname", "my-objectname") + err = s3Client.RemoveIncompleteUpload(context.Background(), "my-bucketname", "my-objectname") if err != nil { log.Fatalln(err) } diff --git a/examples/s3/removeobject.go b/examples/s3/removeobject.go index 7e5848576..777835d7d 100644 --- a/examples/s3/removeobject.go +++ b/examples/s3/removeobject.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,13 +36,22 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - err = s3Client.RemoveObject("my-bucketname", "my-objectname") + + opts := minio.RemoveObjectOptions{ + GovernanceBypass: true, + } + + err = s3Client.RemoveObject(context.Background(), "my-bucketname", "my-objectname", opts) if err != nil { log.Fatalln(err) } + log.Println("Success") } diff --git a/examples/s3/removeobjects.go b/examples/s3/removeobjects.go index a58113483..2250bbf31 100644 --- a/examples/s3/removeobjects.go +++ b/examples/s3/removeobjects.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,7 +36,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -51,7 +56,7 @@ func main() { defer close(doneCh) // List all objects from a bucket-name with a matching prefix. - for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) { + for object := range s3Client.ListObjects(context.Background(), "my-bucketname", "my-prefixname", true, doneCh) { if object.Err != nil { log.Fatalln(object.Err) } @@ -60,7 +65,7 @@ func main() { }() // Call RemoveObjects API - errorCh := s3Client.RemoveObjects("my-bucketname", objectsCh) + errorCh := s3Client.RemoveObjects(context.Background(), "my-bucketname", objectsCh) // Print errors received from RemoveObjects API for e := range errorCh { diff --git a/examples/s3/removeobjecttagging.go b/examples/s3/removeobjecttagging.go new file mode 100644 index 000000000..b359befd6 --- /dev/null +++ b/examples/s3/removeobjecttagging.go @@ -0,0 +1,50 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and + // my-objectname are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + err = s3Client.RemoveObjectTagging(context.Background(), "my-bucketname", "my-objectname") + if err != nil { + log.Fatalln(err) + } +} diff --git a/examples/s3/selectobject.go b/examples/s3/selectobject.go index e23ccf8e1..ea06aff3c 100644 --- a/examples/s3/selectobject.go +++ b/examples/s3/selectobject.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2018 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,7 +25,8 @@ import ( "log" "os" - minio "github.com/minio/minio-go" + minio "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -37,7 +38,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/setbucketencryption.go b/examples/s3/setbucketencryption.go new file mode 100644 index 000000000..81e3c05d5 --- /dev/null +++ b/examples/s3/setbucketencryption.go @@ -0,0 +1,55 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/sse" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + + // s3Client.TraceOn(os.Stderr) + + // Set default encryption configuration on a bucket + err = s3Client.SetBucketEncryption(context.Background(), "my-bucketname", sse.NewConfigurationSSES3()) + if err != nil { + log.Fatalln(err) + } +} diff --git a/examples/s3/setbucketlifecycle.go b/examples/s3/setbucketlifecycle.go index 7eaa946f7..f829f8ea4 100644 --- a/examples/s3/setbucketlifecycle.go +++ b/examples/s3/setbucketlifecycle.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,12 @@ package main import ( + "context" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/lifecycle" ) func main() { @@ -34,7 +37,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -42,8 +48,17 @@ func main() { // s3Client.TraceOn(os.Stderr) // Set lifecycle on a bucket - lifecycle := `expire-bucketEnabled365` - err = s3Client.SetBucketLifecycle("my-bucketname", lifecycle) + config := lifecycle.NewConfiguration() + config.Rules = []lifecycle.Rule{ + { + ID: "expire-bucket", + Status: "Enabled", + Expiration: lifecycle.Expiration{ + Days: 365, + }, + }, + } + err = s3Client.SetBucketLifecycle(context.Background(), "my-bucketname", config) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/setbucketnotification.go b/examples/s3/setbucketnotification.go index b5af30f06..519744efb 100644 --- a/examples/s3/setbucketnotification.go +++ b/examples/s3/setbucketnotification.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,12 @@ package main import ( + "context" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/notification" ) func main() { @@ -34,7 +37,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -55,30 +61,30 @@ func main() { // with actual values that you receive from the S3 provider // Here you create a new Topic notification - topicArn := minio.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE") - topicConfig := minio.NewNotificationConfig(topicArn) + topicArn := notification.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE") + topicConfig := notification.NewConfig(topicArn) topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll) topicConfig.AddFilterPrefix("photos/") topicConfig.AddFilterSuffix(".jpg") // Create a new Queue notification - queueArn := minio.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE") - queueConfig := minio.NewNotificationConfig(queueArn) + queueArn := notification.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE") + queueConfig := notification.NewConfig(queueArn) queueConfig.AddEvents(minio.ObjectRemovedAll) // Create a new Lambda (CloudFunction) - lambdaArn := minio.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE") - lambdaConfig := minio.NewNotificationConfig(lambdaArn) + lambdaArn := notification.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE") + lambdaConfig := notification.NewConfig(lambdaArn) lambdaConfig.AddEvents(minio.ObjectRemovedAll) lambdaConfig.AddFilterSuffix(".swp") // Now, set all previously created notification configs - bucketNotification := minio.BucketNotification{} - bucketNotification.AddTopic(topicConfig) - bucketNotification.AddQueue(queueConfig) - bucketNotification.AddLambda(lambdaConfig) + config := ¬ification.Configuration{} + config.AddTopic(topicConfig) + config.AddQueue(queueConfig) + config.AddLambda(lambdaConfig) - err = s3Client.SetBucketNotification("YOUR-BUCKET", bucketNotification) + err = s3Client.SetBucketNotification(context.Background(), "YOUR-BUCKET", config) if err != nil { log.Fatalln("Error: " + err.Error()) } diff --git a/examples/s3/setbucketpolicy.go b/examples/s3/setbucketpolicy.go index bc42da826..9e47b29be 100644 --- a/examples/s3/setbucketpolicy.go +++ b/examples/s3/setbucketpolicy.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,7 +36,10 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } @@ -44,7 +49,7 @@ func main() { // Create policy policy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:GetObject"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::my-bucketname/*"],"Sid": ""}]}` - err = s3Client.SetBucketPolicy("my-bucketname", policy) + err = s3Client.SetBucketPolicy(context.Background(), "my-bucketname", policy) if err != nil { log.Fatalln(err) } diff --git a/examples/s3/setbucketreplication.go b/examples/s3/setbucketreplication.go new file mode 100644 index 000000000..173e619e9 --- /dev/null +++ b/examples/s3/setbucketreplication.go @@ -0,0 +1,65 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "encoding/xml" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/replication" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + + // s3Client.TraceOn(os.Stderr) + + replicationStr := `stringEnabled1Disabledarn:aws:s3:::destPrefixTag-Key1Tag-Value1Tag-Key2Tag-Value2` + var replCfg replication.Config + err = xml.Unmarshal([]byte(replicationStr), &replCfg) + if err != nil { + log.Fatalln(err) + } + + // This replication ARN should have been generated for replication endpoint using `mc admin bucket remote` command + replCfg.Role = "arn:minio:replica::dadddae7-f1d7-440f-b5d6-651aa9a8c8a7:dest" + // Set replication config on a bucket + err = s3Client.SetBucketReplication(context.Background(), "my-bucketname", replCfg) + if err != nil { + log.Fatalln(err) + } +} diff --git a/examples/s3/setbuckettagging.go b/examples/s3/setbuckettagging.go new file mode 100644 index 000000000..09f579d97 --- /dev/null +++ b/examples/s3/setbuckettagging.go @@ -0,0 +1,60 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "log" + + minio "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/tags" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and + // my-objectname are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + + t, err := tags.MapToBucketTags(map[string]string{ + "Tag1": "Value1", + "Tag2": "Value2", + }) + if err != nil { + log.Fatalln(err) + } + + err = s3Client.SetBucketTagging(context.Background(), "my-bucketname", t) + if err != nil { + log.Fatalln(err) + } +} diff --git a/examples/s3/setobjectlockconfig.go b/examples/s3/setobjectlockconfig.go new file mode 100644 index 000000000..653c838b7 --- /dev/null +++ b/examples/s3/setobjectlockconfig.go @@ -0,0 +1,59 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "log" + + minio "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) + if err != nil { + log.Fatalln(err) + } + + // s3Client.TraceOn(os.Stderr) + + // Set object lock configuration. + mode := minio.Governance + validity := uint(30) + unit := minio.Days + + err = s3Client.SetObjectLockConfig(context.Background(), "my-bucketname", &mode, &validity, &unit) + if err != nil { + log.Fatalln(err) + } + log.Println("Success") +} diff --git a/examples/s3/statobject.go b/examples/s3/statobject.go index 0b27a83b3..667b9e4e6 100644 --- a/examples/s3/statobject.go +++ b/examples/s3/statobject.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ package main import ( + "context" "log" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) func main() { @@ -34,11 +36,14 @@ func main() { // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + s3Client, err := minio.New("s3.amazonaws.com", &minio.Options{ + Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), + Secure: true, + }) if err != nil { log.Fatalln(err) } - stat, err := s3Client.StatObject("my-bucketname", "my-objectname", minio.StatObjectOptions{}) + stat, err := s3Client.StatObject(context.Background(), "my-bucketname", "my-objectname", minio.StatObjectOptions{}) if err != nil { log.Fatalln(err) } diff --git a/functional_tests.go b/functional_tests.go index a5aff8b69..9099efe3b 100644 --- a/functional_tests.go +++ b/functional_tests.go @@ -1,8 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,7 +22,6 @@ package main import ( "bytes" "context" - "encoding/json" "errors" "fmt" "io" @@ -35,15 +34,20 @@ import ( "path/filepath" "reflect" "runtime" + "sort" "strconv" "strings" "time" - humanize "github.com/dustin/go-humanize" - minio "github.com/minio/minio-go" + "github.com/dustin/go-humanize" + jsoniter "github.com/json-iterator/go" log "github.com/sirupsen/logrus" - "github.com/minio/minio-go/pkg/encrypt" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/notification" + "github.com/minio/minio-go/v7/pkg/tags" ) const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" @@ -57,7 +61,6 @@ const ( accessKey = "ACCESS_KEY" secretKey = "SECRET_KEY" enableHTTPS = "ENABLE_HTTPS" - enableKMS = "ENABLE_KMS" ) type mintJSONFormatter struct { @@ -75,7 +78,7 @@ func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) { data[k] = v } } - + var json = jsoniter.ConfigCompatibleWithStandardLibrary serialized, err := json.Marshal(data) if err != nil { return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) @@ -83,6 +86,41 @@ func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) { return append(serialized, '\n'), nil } +var readFull = func(r io.Reader, buf []byte) (n int, err error) { + // ReadFull reads exactly len(buf) bytes from r into buf. + // It returns the number of bytes copied and an error if + // fewer bytes were read. The error is EOF only if no bytes + // were read. If an EOF happens after reading some but not + // all the bytes, ReadFull returns ErrUnexpectedEOF. + // On return, n == len(buf) if and only if err == nil. + // If r returns an error having read at least len(buf) bytes, + // the error is dropped. + for n < len(buf) && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + // Some spurious io.Reader's return + // io.ErrUnexpectedEOF when nn == 0 + // this behavior is undocumented + // so we are on purpose not using io.ReadFull + // implementation because this can lead + // to custom handling, to avoid that + // we simply modify the original io.ReadFull + // implementation to avoid this issue. + // io.ErrUnexpectedEOF with nn == 0 really + // means that io.EOF + if err == io.ErrUnexpectedEOF && nn == 0 { + err = io.EOF + } + n += nn + } + if n >= len(buf) { + err = nil + } else if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + func cleanEmptyEntries(fields log.Fields) log.Fields { cleanFields := log.Fields{} for k, v := range fields { @@ -148,30 +186,64 @@ func cleanupBucket(bucketName string, c *minio.Client) error { // Exit cleanly upon return. defer close(doneCh) // Iterate over all objects in the bucket via listObjectsV2 and delete - for objCh := range c.ListObjectsV2(bucketName, "", true, doneCh) { + for objCh := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Recursive: true}) { if objCh.Err != nil { return objCh.Err } if objCh.Key != "" { - err := c.RemoveObject(bucketName, objCh.Key) + err := c.RemoveObject(context.Background(), bucketName, objCh.Key, minio.RemoveObjectOptions{}) + if err != nil { + return err + } + } + } + for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { + if objPartInfo.Err != nil { + return objPartInfo.Err + } + if objPartInfo.Key != "" { + err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) + if err != nil { + return err + } + } + } + // objects are already deleted, clear the buckets now + err := c.RemoveBucket(context.Background(), bucketName) + if err != nil { + return err + } + return err +} + +func cleanupVersionedBucket(bucketName string, c *minio.Client) error { + doneCh := make(chan struct{}) + defer close(doneCh) + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { + if obj.Err != nil { + return obj.Err + } + if obj.Key != "" { + err := c.RemoveObject(context.Background(), bucketName, obj.Key, + minio.RemoveObjectOptions{VersionID: obj.VersionID, GovernanceBypass: true}) if err != nil { return err } } } - for objPartInfo := range c.ListIncompleteUploads(bucketName, "", true, doneCh) { + for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { if objPartInfo.Err != nil { return objPartInfo.Err } if objPartInfo.Key != "" { - err := c.RemoveIncompleteUpload(bucketName, objPartInfo.Key) + err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) if err != nil { return err } } } // objects are already deleted, clear the buckets now - err := c.RemoveBucket(bucketName) + err := c.RemoveBucket(context.Background(), bucketName) if err != nil { return err } @@ -184,9 +256,9 @@ func isErrNotImplemented(err error) bool { func init() { // If server endpoint is not set, all tests default to - // using https://play.minio.io:9000 + // using https://play.min.io if os.Getenv(serverEndpoint) == "" { - os.Setenv(serverEndpoint, "play.minio.io:9000") + os.Setenv(serverEndpoint, "play.min.io") os.Setenv(accessKey, "Q3AM3UQ867SPQQA43P2F") os.Setenv(secretKey, "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG") os.Setenv(enableHTTPS, "1") @@ -264,7 +336,7 @@ var dataFileMap = map[string]int{ "datafile-5-MB": 5 * humanize.MiByte, "datafile-6-MB": 6 * humanize.MiByte, "datafile-11-MB": 11 * humanize.MiByte, - "datafile-65-MB": 65 * humanize.MiByte, + "datafile-129-MB": 129 * humanize.MiByte, } func isFullMode() bool { @@ -272,7 +344,11 @@ func isFullMode() bool { } func getFuncName() string { - pc, _, _, _ := runtime.Caller(1) + return getFuncNameLoc(2) +} + +func getFuncNameLoc(caller int) string { + pc, _, _, _ := runtime.Caller(caller) return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.") } @@ -300,14 +376,13 @@ func testMakeBucketError() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) return } @@ -315,18 +390,18 @@ func testMakeBucketError() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket in 'eu-central-1'. - if err = c.MakeBucket(bucketName, region); err != nil { + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { logError(testName, function, args, startTime, "", "MakeBucket Failed", err) return } - if err = c.MakeBucket(bucketName, region); err == nil { + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { logError(testName, function, args, startTime, "", "Bucket already exists", err) return } @@ -356,17 +431,16 @@ func testMetadataSizeLimit() { rand.Seed(startTime.Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) return } - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -374,7 +448,7 @@ func testMetadataSizeLimit() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "Make bucket failed", err) return @@ -388,7 +462,7 @@ func testMetadataSizeLimit() { metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test"))) args["metadata"] = fmt.Sprint(metadata) - _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) if err == nil { logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil) return @@ -398,7 +472,7 @@ func testMetadataSizeLimit() { metadata = make(map[string]string) metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test"))) args["metadata"] = fmt.Sprint(metadata) - _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) if err == nil { logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil) return @@ -436,14 +510,13 @@ func testMakeBucketRegions() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) return } @@ -451,14 +524,14 @@ func testMakeBucketRegions() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket in 'eu-central-1'. - if err = c.MakeBucket(bucketName, region); err != nil { + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } @@ -474,7 +547,7 @@ func testMakeBucketRegions() { // virtual host style. region = "us-west-2" args["region"] = region - if err = c.MakeBucket(bucketName+".withperiod", region); err != nil { + if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: region}); err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } @@ -503,14 +576,13 @@ func testPutObjectReadAt() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -518,21 +590,21 @@ func testPutObjectReadAt() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "Make bucket failed", err) return } - bufSize := dataFileMap["datafile-65-MB"] - var reader = getDataReader("datafile-65-MB") + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") defer reader.Close() // Save the data @@ -543,19 +615,14 @@ func testPutObjectReadAt() { objectContentType := "binary/octet-stream" args["objectContentType"] = objectContentType - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType}) + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err) - return - } - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "Get Object failed", err) return @@ -592,35 +659,28 @@ func testPutObjectReadAt() { successLogger(testName, function, args, startTime).Info() } -// Test PutObject using a large data to trigger multipart readat -func testPutObjectWithMetadata() { +func testListObjectVersions() { // initialize logging params startTime := time.Now() testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size, opts)" + function := "ListObjectVersions(bucketName, prefix, recursive)" args := map[string]interface{}{ "bucketName": "", - "objectName": "", - "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", - } - - if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() - return + "prefix": "", + "recursive": "", } // Seed random based on current time. rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -628,78 +688,210 @@ func testPutObjectWithMetadata() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) if err != nil { logError(testName, function, args, startTime, "", "Make bucket failed", err) return } - bufSize := dataFileMap["datafile-65-MB"] - var reader = getDataReader("datafile-65-MB") - defer reader.Close() + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - // Object custom metadata - customContentType := "custom/contenttype" + bufSize := dataFileMap["datafile-10-kB"] + var reader = getDataReader("datafile-10-kB") - args["metadata"] = map[string][]string{ - "Content-Type": {customContentType}, - "X-Amz-Meta-CustomKey": {"extra spaces in value"}, + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return } + reader.Close() - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ - ContentType: customContentType}) + bufSize = dataFileMap["datafile-1-b"] + reader = getDataReader("datafile-1-b") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } + reader.Close() - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err) + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Unexpected object deletion", err) return } - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + var deleteMarkers, versions int + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + if info.Key != objectName { + logError(testName, function, args, startTime, "", "Unexpected object name in listing objects", nil) + return + } + if info.VersionID == "" { + logError(testName, function, args, startTime, "", "Unexpected version id in listing objects", nil) + return + } + if info.IsDeleteMarker { + deleteMarkers++ + if !info.IsLatest { + logError(testName, function, args, startTime, "", "Unexpected IsLatest field in listing objects", nil) + return + } + } else { + versions++ + } + } + + if deleteMarkers != 1 { + logError(testName, function, args, startTime, "", "Unexpected number of DeleteMarker elements in listing objects", nil) + return + } + + if versions != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testStatObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "StatObject" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - st, err := r.Stat() + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) + logError(testName, function, args, startTime, "", "Make bucket failed", err) return } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) return } - if st.ContentType != customContentType && st.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-10-kB"] + var reader = getDataReader("datafile-10-kB") + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Object Close failed", err) + reader.Close() + + bufSize = dataFileMap["datafile-1-b"] + reader = getDataReader("datafile-1-b") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) + reader.Close() + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + if len(results) != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) return } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { + for i := 0; i < len(results); i++ { + opts := minio.StatObjectOptions{VersionID: results[i].VersionID} + statInfo, err := c.StatObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during HEAD object", err) + return + } + if statInfo.VersionID == "" || statInfo.VersionID != results[i].VersionID { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected version id", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { logError(testName, function, args, startTime, "", "Cleanup failed", err) return } @@ -707,31 +899,24 @@ func testPutObjectWithMetadata() { successLogger(testName, function, args, startTime).Info() } -func testPutObjectWithContentLanguage() { +func testGetObjectWithVersioning() { // initialize logging params - objectName := "test-object" startTime := time.Now() testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": objectName, - "size": -1, - "opts": "", - } + function := "GetObject()" + args := map[string]interface{}{} // Seed random based on current time. rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -739,45 +924,113 @@ func testPutObjectWithContentLanguage() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName + // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) + logError(testName, function, args, startTime, "", "Make bucket failed", err) return } - data := bytes.Repeat([]byte("a"), int(0)) - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{ - ContentLanguage: "en", - }) + err = c.EnableVersioning(context.Background(), bucketName) if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) + logError(testName, function, args, startTime, "", "Enable versioning failed", err) return } - if n != 0 { - logError(testName, function, args, startTime, "", "Expected upload object '0' doesn't match with PutObject return value", err) - return + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Save the contents of datafiles to check with GetObject() reader output later + var buffers [][]byte + var testFiles = []string{"datafile-1-b", "datafile-10-kB"} + + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + buffers = append(buffers, buf) } - objInfo, err := c.StatObject(bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) } - if objInfo.Metadata.Get("Content-Language") != "en" { - logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err) + if len(results) != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) return } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { + sort.SliceStable(results, func(i, j int) bool { + return results[i].Size < results[j].Size + }) + + sort.SliceStable(buffers, func(i, j int) bool { + return len(buffers[i]) < len(buffers[j]) + }) + + for i := 0; i < len(results); i++ { + opts := minio.GetObjectOptions{VersionID: results[i].VersionID} + reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during GET object", err) + return + } + statInfo, err := reader.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + + tmpBuffer := bytes.NewBuffer([]byte{}) + _, err = io.Copy(tmpBuffer, reader) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) + return + } + + if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { + logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { logError(testName, function, args, startTime, "", "Cleanup failed", err) return } @@ -785,32 +1038,24 @@ func testPutObjectWithContentLanguage() { successLogger(testName, function, args, startTime).Info() } -// Test put object with streaming signature. -func testPutObjectStreaming() { +func testCopyObjectWithVersioning() { // initialize logging params - objectName := "test-object" startTime := time.Now() testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size,opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": objectName, - "size": -1, - "opts": "", - } + function := "CopyObject()" + args := map[string]interface{}{} // Seed random based on current time. rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -818,33 +1063,846 @@ func testPutObjectStreaming() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName + // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) + logError(testName, function, args, startTime, "", "Make bucket failed", err) return } - // Upload an object. - sizes := []int64{0, 64*1024 - 1, 64 * 1024} + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } - for _, size := range sizes { - data := bytes.Repeat([]byte("a"), int(size)) - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(size), minio.PutObjectOptions{}) + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + var testFiles = []string{"datafile-1-b", "datafile-10-kB"} + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := ioutil.ReadAll(r) if err != nil { - logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) + logError(testName, function, args, startTime, "", "unexpected failure", err) return } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var infos []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + infos = append(infos, info) + } - if n != size { - logError(testName, function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err) + sort.Slice(infos, func(i, j int) bool { + return infos[i].Size < infos[j].Size + }) + + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) + return + } + + oldestContent, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) + return + } + + // Copy Source + srcOpts := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: infos[0].VersionID, + } + args["src"] = srcOpts + + dstOpts := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + args["dst"] = dstOpts + + // Perform the Copy + if _, err = c.CopyObject(context.Background(), dstOpts, srcOpts); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer readerCopy.Close() + + newestContent, err := ioutil.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) + return + } + + if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testComposeObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // var testFiles = []string{"datafile-5-MB", "datafile-10-kB"} + var testFiles = []string{"datafile-5-MB", "datafile-10-kB"} + var testFilesBytes [][]byte + + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + testFilesBytes = append(testFilesBytes, buf) + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + sort.SliceStable(results, func(i, j int) bool { + return results[i].Size > results[j].Size + }) + + // Source objects to concatenate. We also specify decryption + // key for each + src1 := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: results[0].VersionID, + } + + src2 := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: results[1].VersionID, + } + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + + _, err = c.ComposeObject(context.Background(), dst, src1, src2) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the copy object failed", err) + return + } + defer readerCopy.Close() + + copyContentBytes, err := ioutil.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err) + return + } + + var expectedContent []byte + for _, fileBytes := range testFilesBytes { + expectedContent = append(expectedContent, fileBytes...) + } + + if len(copyContentBytes) == 0 || !bytes.Equal(copyContentBytes, expectedContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testRemoveObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "DeleteObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var version minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + version = info + break + } + + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{VersionID: version.VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "DeleteObject failed", err) + return + } + + objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for range objectsInfo { + logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) + return + } + + err = c.RemoveBucket(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testRemoveObjectsWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "DeleteObjects()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objectsVersions := make(chan minio.ObjectInfo) + go func() { + objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, + minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for info := range objectsVersionsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + objectsVersions <- info + } + close(objectsVersions) + }() + + removeErrors := c.RemoveObjects(context.Background(), bucketName, objectsVersions, minio.RemoveObjectsOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "DeleteObjects call failed", err) + return + } + + for e := range removeErrors { + if e.Err != nil { + logError(testName, function, args, startTime, "", "Single delete operation failed", err) + return + } + } + + objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for range objectsVersionsInfo { + logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) + return + } + + err = c.RemoveBucket(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testObjectTaggingWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "{Get,Set,Remove}ObjectTagging()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + for _, file := range []string{"datafile-1-b", "datafile-10-kB"} { + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader(file), int64(dataFileMap[file]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + versionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var versions []minio.ObjectInfo + for info := range versionsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + versions = append(versions, info) + } + + sort.SliceStable(versions, func(i, j int) bool { + return versions[i].Size < versions[j].Size + }) + + tagsV1 := map[string]string{"key1": "val1"} + t1, err := tags.MapToObjectTags(tagsV1) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + err = c.PutObjectTagging(context.Background(), bucketName, objectName, t1, minio.PutObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + tagsV2 := map[string]string{"key2": "val2"} + t2, err := tags.MapToObjectTags(tagsV2) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + err = c.PutObjectTagging(context.Background(), bucketName, objectName, t2, minio.PutObjectTaggingOptions{VersionID: versions[1].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) + return + } + + tagsEqual := func(tags1, tags2 map[string]string) bool { + for k1, v1 := range tags1 { + v2, found := tags2[k1] + if found { + if v1 != v2 { + return false + } + } + } + return true + } + + gotTagsV1, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) + return + } + + if !tagsEqual(t1.ToMap(), gotTagsV1.ToMap()) { + logError(testName, function, args, startTime, "", "Unexpected tags content (1)", err) + return + } + + gotTagsV2, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTaggingContext failed", err) + return + } + + if !tagsEqual(t2.ToMap(), gotTagsV2.ToMap()) { + logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) + return + } + + err = c.RemoveObjectTagging(context.Background(), bucketName, objectName, minio.RemoveObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) + return + } + + emptyTags, err := c.GetObjectTagging(context.Background(), bucketName, objectName, + minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) + return + } + + if len(emptyTags.ToMap()) != 0 { + logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test PutObject using a large data to trigger multipart readat +func testPutObjectWithMetadata() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", + } + + if !isFullMode() { + ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Object custom metadata + customContentType := "custom/contenttype" + + args["metadata"] = map[string][]string{ + "Content-Type": {customContentType}, + "X-Amz-Meta-CustomKey": {"extra spaces in value"}, + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ + ContentType: customContentType}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if st.ContentType != customContentType && st.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testPutObjectWithContentLanguage() { + // initialize logging params + objectName := "test-object" + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": objectName, + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + data := bytes.Repeat([]byte("a"), int(0)) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{ + ContentLanguage: "en", + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if objInfo.Metadata.Get("Content-Language") != "en" { + logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test put object with streaming signature. +func testPutObjectStreaming() { + // initialize logging params + objectName := "test-object" + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size,opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": objectName, + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Upload an object. + sizes := []int64{0, 64*1024 - 1, 64 * 1024} + + for _, size := range sizes { + data := bytes.Repeat([]byte("a"), int(size)) + ui, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(data), int64(size), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) + return + } + + if ui.Size != size { + logError(testName, function, args, startTime, "", "PutObjectStreaming result has unexpected size", nil) + return + } + + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if objInfo.Size != size { + logError(testName, function, args, startTime, "", "Unexpected size", err) return } + } // Delete all objects and buckets @@ -868,14 +1926,13 @@ func testGetObjectSeekEnd() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -883,14 +1940,14 @@ func testGetObjectSeekEnd() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -911,19 +1968,14 @@ func testGetObjectSeekEnd() { return } - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(n), err) - return - } - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -950,9 +2002,9 @@ func testGetObjectSeekEnd() { return } buf2 := make([]byte, 100) - m, err := io.ReadFull(r, buf2) + m, err := readFull(r, buf2) if err != nil { - logError(testName, function, args, startTime, "", "Error reading through io.ReadFull", err) + logError(testName, function, args, startTime, "", "Error reading through readFull", err) return } if m != len(buf2) { @@ -1000,14 +2052,13 @@ func testGetObjectClosedTwice() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -1015,14 +2066,14 @@ func testGetObjectClosedTwice() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -1037,19 +2088,14 @@ func testGetObjectClosedTwice() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "PutObject response doesn't match sent bytes, expected "+string(int64(bufSize))+" got "+string(n), err) - return - } - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -1082,12 +2128,12 @@ func testGetObjectClosedTwice() { successLogger(testName, function, args, startTime).Info() } -// Test RemoveObjectsWithContext request context cancels after timeout -func testRemoveObjectsWithContext() { +// Test RemoveObjects request where context cancels after timeout +func testRemoveObjectsContext() { // Initialize logging params. startTime := time.Now() testName := getFuncName() - function := "RemoveObjectsWithContext(ctx, bucketName, objectsCh)" + function := "RemoveObjects(ctx, bucketName, objectsCh)" args := map[string]interface{}{ "bucketName": "", } @@ -1096,19 +2142,18 @@ func testRemoveObjectsWithContext() { rand.Seed(time.Now().Unix()) // Instantiate new minio client. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Enable tracing, write to stdout. // c.TraceOn(os.Stderr) @@ -1117,7 +2162,7 @@ func testRemoveObjectsWithContext() { args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) } @@ -1127,17 +2172,21 @@ func testRemoveObjectsWithContext() { // Multi remove of 20 objects. nrObjects := 20 - objectsCh := make(chan string) + objectsCh := make(chan minio.ObjectInfo) go func() { defer close(objectsCh) for i := 0; i < nrObjects; i++ { objectName := "sample" + strconv.Itoa(i) + ".txt" - _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) continue } - objectsCh <- objectName + objectsCh <- minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + } } }() // Set context to cancel in 1 nanosecond. @@ -1145,13 +2194,13 @@ func testRemoveObjectsWithContext() { args["ctx"] = ctx defer cancel() - // Call RemoveObjectsWithContext API with short timeout. - errorCh := c.RemoveObjectsWithContext(ctx, bucketName, objectsCh) + // Call RemoveObjects API with short timeout. + errorCh := c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) // Check for error. select { case r := <-errorCh: if r.Err == nil { - logError(testName, function, args, startTime, "", "RemoveObjectsWithContext should fail on short timeout", err) + logError(testName, function, args, startTime, "", "RemoveObjects should fail on short timeout", err) return } } @@ -1159,8 +2208,8 @@ func testRemoveObjectsWithContext() { ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) args["ctx"] = ctx defer cancel() - // Perform RemoveObjectsWithContext with the longer timeout. Expect the removals to succeed. - errorCh = c.RemoveObjectsWithContext(ctx, bucketName, objectsCh) + // Perform RemoveObjects with the longer timeout. Expect the removals to succeed. + errorCh = c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) select { case r, more := <-errorCh: if more || r.Err != nil { @@ -1191,20 +2240,18 @@ func testRemoveMultipleObjects() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Enable tracing, write to stdout. // c.TraceOn(os.Stderr) @@ -1214,7 +2261,7 @@ func testRemoveMultipleObjects() { args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -1225,24 +2272,28 @@ func testRemoveMultipleObjects() { // Multi remove of 1100 objects nrObjects := 200 - objectsCh := make(chan string) + objectsCh := make(chan minio.ObjectInfo) go func() { defer close(objectsCh) // Upload objects and send them to objectsCh for i := 0; i < nrObjects; i++ { objectName := "sample" + strconv.Itoa(i) + ".txt" - _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) continue } - objectsCh <- objectName + objectsCh <- minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + } } }() // Call RemoveObjects API - errorCh := c.RemoveObjects(bucketName, objectsCh) + errorCh := c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) // Check if errorCh doesn't receive any error select { @@ -1279,14 +2330,13 @@ func testFPutObjectMultipart() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -1294,21 +2344,21 @@ func testFPutObjectMultipart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. - var fileName = getMintDataDirFilePath("datafile-65-MB") + var fileName = getMintDataDirFilePath("datafile-129-MB") if fileName == "" { // Make a temp file with minPartSize bytes of data. file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") @@ -1317,7 +2367,7 @@ func testFPutObjectMultipart() { return } // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload. - if _, err = io.Copy(file, getDataReader("datafile-65-MB")); err != nil { + if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { logError(testName, function, args, startTime, "", "Copy failed", err) return } @@ -1328,7 +2378,7 @@ func testFPutObjectMultipart() { fileName = file.Name() args["fileName"] = fileName } - totalSize := dataFileMap["datafile-65-MB"] + totalSize := dataFileMap["datafile-129-MB"] // Set base object name objectName := bucketName + "FPutObject" + "-standard" args["objectName"] = objectName @@ -1337,17 +2387,13 @@ func testFPutObjectMultipart() { args["objectContentType"] = objectContentType // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - n, err := c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType}) + _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType}) if err != nil { logError(testName, function, args, startTime, "", "FPutObject failed", err) return } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -1393,14 +2439,13 @@ func testFPutObject() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -1408,7 +2453,7 @@ func testFPutObject() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -1417,8 +2462,8 @@ func testFPutObject() { // Make a new bucket. args["bucketName"] = bucketName args["location"] = location - function = "MakeBucket()bucketName, location" - err = c.MakeBucket(bucketName, location) + function = "MakeBucket(bucketName, location)" + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -1426,7 +2471,7 @@ func testFPutObject() { // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part. // Use different data in part for multipart tests to check parts are uploaded in correct order. - var fName = getMintDataDirFilePath("datafile-65-MB") + var fName = getMintDataDirFilePath("datafile-129-MB") if fName == "" { // Make a temp file with minPartSize bytes of data. file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") @@ -1436,7 +2481,7 @@ func testFPutObject() { } // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload. - if _, err = io.Copy(file, getDataReader("datafile-65-MB")); err != nil { + if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { logError(testName, function, args, startTime, "", "File copy failed", err) return } @@ -1448,7 +2493,6 @@ func testFPutObject() { defer os.Remove(file.Name()) fName = file.Name() } - totalSize := dataFileMap["datafile-65-MB"] // Set base object name function = "FPutObject(bucketName, objectName, fileName, opts)" @@ -1458,28 +2502,25 @@ func testFPutObject() { args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"} // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - n, err := c.FPutObject(bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - + ui, err := c.FPutObject(context.Background(), bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "FPutObject failed", err) return } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) + + if ui.Size != int64(dataFileMap["datafile-129-MB"]) { + logError(testName, function, args, startTime, "", "FPutObject returned an unexpected upload size", err) return } // Perform FPutObject with no contentType provided (Expecting application/octet-stream) args["objectName"] = objectName + "-Octet" - n, err = c.FPutObject(bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{}) + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "File close failed", err) return } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) - return - } + srcFile, err := os.Open(fName) if err != nil { logError(testName, function, args, startTime, "", "File open failed", err) @@ -1502,20 +2543,16 @@ func testFPutObject() { // Perform FPutObject with no contentType provided (Expecting application/x-gtar) args["objectName"] = objectName + "-GTar" args["opts"] = minio.PutObjectOptions{} - n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "FPutObject failed", err) return } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) - return - } // Check headers function = "StatObject(bucketName, objectName, opts)" args["objectName"] = objectName + "-standard" - rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{}) + rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "StatObject failed", err) return @@ -1527,7 +2564,7 @@ func testFPutObject() { function = "StatObject(bucketName, objectName, opts)" args["objectName"] = objectName + "-Octet" - rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{}) + rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "StatObject failed", err) return @@ -1539,7 +2576,7 @@ func testFPutObject() { function = "StatObject(bucketName, objectName, opts)" args["objectName"] = objectName + "-GTar" - rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{}) + rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "StatObject failed", err) return @@ -1555,16 +2592,12 @@ func testFPutObject() { return } - if err = os.Remove(fName + ".gtar"); err != nil { - logError(testName, function, args, startTime, "", "File remove failed", err) - return - } - + os.Remove(fName + ".gtar") successLogger(testName, function, args, startTime).Info() } -// Tests FPutObjectWithContext request context cancels after timeout -func testFPutObjectWithContext() { +// Tests FPutObject request when context cancels after timeout +func testFPutObjectContext() { // initialize logging params startTime := time.Now() testName := getFuncName() @@ -1579,14 +2612,13 @@ func testFPutObjectWithContext() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -1594,14 +2626,14 @@ func testFPutObjectWithContext() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -1612,7 +2644,7 @@ func testFPutObjectWithContext() { var fName = getMintDataDirFilePath("datafile-1-MB") if fName == "" { // Make a temp file with 1 MiB bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest") + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") if err != nil { logError(testName, function, args, startTime, "", "TempFile creation failed", err) return @@ -1631,35 +2663,30 @@ func testFPutObjectWithContext() { defer os.Remove(file.Name()) fName = file.Name() } - totalSize := dataFileMap["datafile-1-MB"] // Set base object name - objectName := bucketName + "FPutObjectWithContext" + objectName := bucketName + "FPutObjectContext" args["objectName"] = objectName ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) args["ctx"] = ctx defer cancel() - // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream) - _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + // Perform FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err == nil { - logError(testName, function, args, startTime, "", "FPutObjectWithContext should fail on short timeout", err) + logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) return } ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) defer cancel() - // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed - n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + // Perform FPutObject with a long timeout. Expect the put object to succeed + _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) if err != nil { - logError(testName, function, args, startTime, "", "FPutObjectWithContext shouldn't fail on long timeout", err) - return - } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) + logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on long timeout", err) return } - _, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) + _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "StatObject failed", err) return @@ -1675,12 +2702,12 @@ func testFPutObjectWithContext() { } -// Tests FPutObjectWithContext request context cancels after timeout -func testFPutObjectWithContextV2() { +// Tests FPutObject request when context cancels after timeout +func testFPutObjectContextV2() { // initialize logging params startTime := time.Now() testName := getFuncName() - function := "FPutObjectWithContext(ctx, bucketName, objectName, fileName, opts)" + function := "FPutObjectContext(ctx, bucketName, objectName, fileName, opts)" args := map[string]interface{}{ "bucketName": "", "objectName": "", @@ -1690,14 +2717,13 @@ func testFPutObjectWithContextV2() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -1705,14 +2731,14 @@ func testFPutObjectWithContextV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -1723,7 +2749,7 @@ func testFPutObjectWithContextV2() { var fName = getMintDataDirFilePath("datafile-1-MB") if fName == "" { // Make a temp file with 1 MiB bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest") + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") if err != nil { logError(testName, function, args, startTime, "", "Temp file creation failed", err) return @@ -1743,36 +2769,31 @@ func testFPutObjectWithContextV2() { defer os.Remove(file.Name()) fName = file.Name() } - totalSize := dataFileMap["datafile-1-MB"] // Set base object name - objectName := bucketName + "FPutObjectWithContext" + objectName := bucketName + "FPutObjectContext" args["objectName"] = objectName ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) args["ctx"] = ctx defer cancel() - // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream) - _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + // Perform FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err == nil { - logError(testName, function, args, startTime, "", "FPutObjectWithContext should fail on short timeout", err) + logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) return } ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) defer cancel() - // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed - n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + // Perform FPutObject with a long timeout. Expect the put object to succeed + _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) if err != nil { - logError(testName, function, args, startTime, "", "FPutObjectWithContext shouldn't fail on longer timeout", err) - return - } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match:wanted"+string(totalSize)+" got "+string(n), err) + logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on longer timeout", err) return } - _, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) + _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "StatObject failed", err) return @@ -1789,26 +2810,26 @@ func testFPutObjectWithContextV2() { } // Test validates putObject with context to see if request cancellation is honored. -func testPutObjectWithContext() { +func testPutObjectContext() { // initialize logging params startTime := time.Now() testName := getFuncName() - function := "PutObjectWithContext(ctx, bucketName, objectName, fileName, opts)" + function := "PutObject(ctx, bucketName, objectName, fileName, opts)" args := map[string]interface{}{ "ctx": "", "bucketName": "", "objectName": "", "opts": "", } + // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -1816,13 +2837,13 @@ func testPutObjectWithContext() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket call failed", err) return @@ -1838,9 +2859,9 @@ func testPutObjectWithContext() { args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"} defer cancel() - _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err == nil { - logError(testName, function, args, startTime, "", "PutObjectWithContext should fail on short timeout", err) + logError(testName, function, args, startTime, "", "PutObject should fail on short timeout", err) return } @@ -1850,9 +2871,9 @@ func testPutObjectWithContext() { defer cancel() reader = getDataReader("datafile-33-kB") defer reader.Close() - _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithContext with long timeout failed", err) + logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) return } @@ -1878,14 +2899,13 @@ func testGetObjectReadSeekFunctional() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -1893,14 +2913,14 @@ func testGetObjectReadSeekFunctional() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -1929,19 +2949,14 @@ func testGetObjectReadSeekFunctional() { } // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -2053,14 +3068,13 @@ func testGetObjectReadAtFunctional() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -2068,14 +3082,14 @@ func testGetObjectReadAtFunctional() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -2096,19 +3110,14 @@ func testGetObjectReadAtFunctional() { } // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - // read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return @@ -2191,7 +3200,7 @@ func testGetObjectReadAtFunctional() { return } - buf5 := make([]byte, n) + buf5 := make([]byte, len(buf)) // Read the whole object. m, err = r.ReadAt(buf5, 0) if err != nil { @@ -2209,7 +3218,7 @@ func testGetObjectReadAtFunctional() { return } - buf6 := make([]byte, n+1) + buf6 := make([]byte, len(buf)+1) // Read the whole object and beyond. _, err = r.ReadAt(buf6, 0) if err != nil { @@ -2226,6 +3235,127 @@ func testGetObjectReadAtFunctional() { successLogger(testName, function, args, startTime).Info() } +// Reproduces issue https://github.com/minio/minio-go/issues/1137 +func testGetObjectReadAtWhenEOFWasReached() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read directly + buf1 := make([]byte, len(buf)) + buf2 := make([]byte, 512) + + m, err := r.Read(buf1) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "Read read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf) { + logError(testName, function, args, startTime, "", "Incorrect count of Read data", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, 512) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[512:1024]) { + logError(testName, function, args, startTime, "", "Incorrect count of ReadAt data", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + // Test Presigned Post Policy func testPresignedPostPolicy() { // initialize logging params @@ -2240,14 +3370,13 @@ func testPresignedPostPolicy() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -2255,20 +3384,19 @@ func testPresignedPostPolicy() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] var reader = getDataReader("datafile-33-kB") defer reader.Close() @@ -2284,17 +3412,12 @@ func testPresignedPostPolicy() { } // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err) - return - } - policy := minio.NewPostPolicy() if err := policy.SetBucket(""); err == nil { @@ -2334,7 +3457,7 @@ func testPresignedPostPolicy() { policy.SetUserMetadata(metadataKey, metadataValue) args["policy"] = policy.String() - presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(policy) + presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy) if err != nil { logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) return @@ -2386,8 +3509,30 @@ func testPresignedPostPolicy() { } writer.Close() + transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) + if err != nil { + logError(testName, function, args, startTime, "", "DefaultTransport failed", err) + return + } + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: transport, + } + + req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes())) + if err != nil { + logError(testName, function, args, startTime, "", "Http request failed", err) + return + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + // make post request with correct form data - res, err := http.Post(presignedPostPolicyURL.String(), writer.FormDataContentType(), bytes.NewReader(formBuf.Bytes())) + res, err := httpClient.Do(req) if err != nil { logError(testName, function, args, startTime, "", "Http request failed", err) return @@ -2440,14 +3585,13 @@ func testCopyObject() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -2455,20 +3599,20 @@ func testCopyObject() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } // Make a new bucket in 'us-east-1' (destination bucket). - err = c.MakeBucket(bucketName+"-copy", "us-east-1") + err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -2480,18 +3624,13 @@ func testCopyObject() { // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -2504,71 +3643,40 @@ func testCopyObject() { } // Copy Source - src := minio.NewSourceInfo(bucketName, objectName, nil) - args["src"] = src - - // Set copy conditions. - - // All invalid conditions first. - err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) - if err == nil { - logError(testName, function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err) - return - } - err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) - if err == nil { - logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err) - return - } - err = src.SetMatchETagCond("") - if err == nil { - logError(testName, function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err) - return - } - err = src.SetMatchETagExceptCond("") - if err == nil { - logError(testName, function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err) - return - } - - err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) - if err != nil { - logError(testName, function, args, startTime, "", "SetModifiedSinceCond failed", err) - return - } - err = src.SetMatchETagCond(objInfo.ETag) - if err != nil { - logError(testName, function, args, startTime, "", "SetMatchETagCond failed", err) - return + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + // Set copy conditions. + MatchETag: objInfo.ETag, + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), } + args["src"] = src - dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil) - args["dst"] = dst - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return + dst := minio.CopyDestOptions{ + Bucket: bucketName + "-copy", + Object: objectName + "-copy", } // Perform the Copy - err = c.CopyObject(dst, src) - if err != nil { + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { logError(testName, function, args, startTime, "", "CopyObject failed", err) return } // Source object - r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return } // Destination object - readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) + readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return } + // Check the various fields of source object against destination object. objInfo, err = r.Stat() if err != nil { @@ -2590,44 +3698,43 @@ func testCopyObject() { readerCopy.Close() // CopyObject again but with wrong conditions - src = minio.NewSourceInfo(bucketName, objectName, nil) - err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) - if err != nil { - logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond failed", err) - return - } - err = src.SetMatchETagExceptCond(objInfo.ETag) - if err != nil { - logError(testName, function, args, startTime, "", "SetMatchETagExceptCond failed", err) - return + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + NoMatchETag: objInfo.ETag, } // Perform the Copy which should fail - err = c.CopyObject(dst, src) + _, err = c.CopyObject(context.Background(), dst, src) if err == nil { logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) return } - // Perform the Copy which should update only metadata. - src = minio.NewSourceInfo(bucketName, objectName, nil) - dst, err = minio.NewDestinationInfo(bucketName, objectName, nil, map[string]string{ - "Copy": "should be same", - }) + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + } + + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName, + ReplaceMetadata: true, + UserMetadata: map[string]string{ + "Copy": "should be same", + }, + } args["dst"] = dst args["src"] = src - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - err = c.CopyObject(dst, src) + _, err = c.CopyObject(context.Background(), dst, src) if err != nil { logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err) return } - oi, err := c.StatObject(bucketName, objectName, minio.StatObjectOptions{}) + oi, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "StatObject failed", err) return @@ -2635,7 +3742,7 @@ func testCopyObject() { stOpts := minio.StatObjectOptions{} stOpts.SetMatchETag(oi.ETag) - objInfo, err = c.StatObject(bucketName, objectName, stOpts) + objInfo, err = c.StatObject(context.Background(), bucketName, objectName, stOpts) if err != nil { logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err) return @@ -2670,14 +3777,13 @@ func testSSECEncryptedGetObjectReadSeekFunctional() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -2685,14 +3791,14 @@ func testSSECEncryptedGetObjectReadSeekFunctional() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -2706,9 +3812,9 @@ func testSSECEncryptedGetObjectReadSeekFunctional() { } }() - // Generate 65MiB of data. - bufSize := dataFileMap["datafile-65-MB"] - var reader = getDataReader("datafile-65-MB") + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -2721,7 +3827,7 @@ func testSSECEncryptedGetObjectReadSeekFunctional() { } // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ ContentType: "binary/octet-stream", ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), }) @@ -2730,13 +3836,8 @@ func testSSECEncryptedGetObjectReadSeekFunctional() { return } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), }) if err != nil { @@ -2858,14 +3959,13 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -2873,14 +3973,14 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -2894,9 +3994,9 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() { } }() - // Generate 65MiB of data. - bufSize := dataFileMap["datafile-65-MB"] - var reader = getDataReader("datafile-65-MB") + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -2909,7 +4009,7 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() { } // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ ContentType: "binary/octet-stream", ServerSideEncryption: encrypt.NewSSE(), }) @@ -2918,13 +4018,8 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() { return } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -3044,14 +4139,13 @@ func testSSECEncryptedGetObjectReadAtFunctional() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -3059,22 +4153,22 @@ func testSSECEncryptedGetObjectReadAtFunctional() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } - // Generate 65MiB of data. - bufSize := dataFileMap["datafile-65-MB"] - var reader = getDataReader("datafile-65-MB") + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -3087,7 +4181,7 @@ func testSSECEncryptedGetObjectReadAtFunctional() { } // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ ContentType: "binary/octet-stream", ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), }) @@ -3096,13 +4190,8 @@ func testSSECEncryptedGetObjectReadAtFunctional() { return } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - // read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), }) if err != nil { @@ -3188,7 +4277,7 @@ func testSSECEncryptedGetObjectReadAtFunctional() { return } - buf5 := make([]byte, n) + buf5 := make([]byte, len(buf)) // Read the whole object. m, err = r.ReadAt(buf5, 0) if err != nil { @@ -3206,7 +4295,7 @@ func testSSECEncryptedGetObjectReadAtFunctional() { return } - buf6 := make([]byte, n+1) + buf6 := make([]byte, len(buf)+1) // Read the whole object and beyond. _, err = r.ReadAt(buf6, 0) if err != nil { @@ -3235,14 +4324,13 @@ func testSSES3EncryptedGetObjectReadAtFunctional() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -3250,22 +4338,22 @@ func testSSES3EncryptedGetObjectReadAtFunctional() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } - // Generate 65MiB of data. - bufSize := dataFileMap["datafile-65-MB"] - var reader = getDataReader("datafile-65-MB") + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -3278,7 +4366,7 @@ func testSSES3EncryptedGetObjectReadAtFunctional() { } // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ ContentType: "binary/octet-stream", ServerSideEncryption: encrypt.NewSSE(), }) @@ -3287,13 +4375,8 @@ func testSSES3EncryptedGetObjectReadAtFunctional() { return } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - // read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return @@ -3377,7 +4460,7 @@ func testSSES3EncryptedGetObjectReadAtFunctional() { return } - buf5 := make([]byte, n) + buf5 := make([]byte, len(buf)) // Read the whole object. m, err = r.ReadAt(buf5, 0) if err != nil { @@ -3395,7 +4478,7 @@ func testSSES3EncryptedGetObjectReadAtFunctional() { return } - buf6 := make([]byte, n+1) + buf6 := make([]byte, len(buf)+1) // Read the whole object and beyond. _, err = r.ReadAt(buf6, 0) if err != nil { @@ -3427,14 +4510,13 @@ func testSSECEncryptionPutGet() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -3442,14 +4524,14 @@ func testSSECEncryptionPutGet() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -3482,14 +4564,14 @@ func testSSECEncryptionPutGet() { args["sse"] = sse // Put encrypted data - _, err = c.PutObject(bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) if err != nil { logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) return } // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) if err != nil { logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) return @@ -3541,14 +4623,13 @@ func testSSECEncryptionFPut() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -3556,14 +4637,14 @@ func testSSECEncryptionFPut() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -3613,13 +4694,13 @@ func testSSECEncryptionFPut() { } file.Close() // Put encrypted data - if _, err = c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { + if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) return } // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) if err != nil { logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) return @@ -3641,10 +4722,7 @@ func testSSECEncryptionFPut() { return } - if err = os.Remove(fileName); err != nil { - logError(testName, function, args, startTime, "", "File remove failed", err) - return - } + os.Remove(fileName) } // Delete all objects and buckets @@ -3671,14 +4749,13 @@ func testSSES3EncryptionPutGet() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -3686,14 +4763,14 @@ func testSSES3EncryptionPutGet() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -3724,14 +4801,14 @@ func testSSES3EncryptionPutGet() { args["sse"] = sse // Put encrypted data - _, err = c.PutObject(bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) if err != nil { logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) return } // Read the data back without any encryption headers - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) return @@ -3783,14 +4860,13 @@ func testSSES3EncryptionFPut() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -3798,14 +4874,14 @@ func testSSES3EncryptionFPut() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -3854,13 +4930,13 @@ func testSSES3EncryptionFPut() { } file.Close() // Put encrypted data - if _, err = c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { + if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) return } // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) return @@ -3882,10 +4958,7 @@ func testSSES3EncryptionFPut() { return } - if err = os.Remove(fileName); err != nil { - logError(testName, function, args, startTime, "", "File remove failed", err) - return - } + os.Remove(fileName) } // Delete all objects and buckets @@ -3918,14 +4991,13 @@ func testBucketNotification() { // Seed random based on current time. rand.Seed(time.Now().Unix()) - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -3933,61 +5005,60 @@ func testBucketNotification() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") bucketName := os.Getenv("NOTIFY_BUCKET") args["bucketName"] = bucketName - topicArn := minio.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE")) - queueArn := minio.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource") + topicArn := notification.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE")) + queueArn := notification.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource") - topicConfig := minio.NewNotificationConfig(topicArn) - - topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll) + topicConfig := notification.NewConfig(topicArn) + topicConfig.AddEvents(notification.ObjectCreatedAll, notification.ObjectRemovedAll) topicConfig.AddFilterSuffix("jpg") - queueConfig := minio.NewNotificationConfig(queueArn) - queueConfig.AddEvents(minio.ObjectCreatedAll) + queueConfig := notification.NewConfig(queueArn) + queueConfig.AddEvents(notification.ObjectCreatedAll) queueConfig.AddFilterPrefix("photos/") - bNotification := minio.BucketNotification{} - bNotification.AddTopic(topicConfig) + config := notification.Configuration{} + config.AddTopic(topicConfig) // Add the same topicConfig again, should have no effect // because it is duplicated - bNotification.AddTopic(topicConfig) - if len(bNotification.TopicConfigs) != 1 { + config.AddTopic(topicConfig) + if len(config.TopicConfigs) != 1 { logError(testName, function, args, startTime, "", "Duplicate entry added", err) return } // Add and remove a queue config - bNotification.AddQueue(queueConfig) - bNotification.RemoveQueueByArn(queueArn) + config.AddQueue(queueConfig) + config.RemoveQueueByArn(queueArn) - err = c.SetBucketNotification(bucketName, bNotification) + err = c.SetBucketNotification(context.Background(), bucketName, config) if err != nil { logError(testName, function, args, startTime, "", "SetBucketNotification failed", err) return } - bNotification, err = c.GetBucketNotification(bucketName) + config, err = c.GetBucketNotification(context.Background(), bucketName) if err != nil { logError(testName, function, args, startTime, "", "GetBucketNotification failed", err) return } - if len(bNotification.TopicConfigs) != 1 { + if len(config.TopicConfigs) != 1 { logError(testName, function, args, startTime, "", "Topic config is empty", err) return } - if bNotification.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" { + if config.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" { logError(testName, function, args, startTime, "", "Couldn't get the suffix", err) return } - err = c.RemoveAllBucketNotification(bucketName) + err = c.RemoveAllBucketNotification(context.Background(), bucketName) if err != nil { logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err) return @@ -4014,14 +5085,13 @@ func testFunctional() { // Seed random based on current time. rand.Seed(time.Now().Unix()) - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, nil, startTime, "", "Minio client object creation failed", err) + logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err) return } @@ -4029,7 +5099,7 @@ func testFunctional() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -4038,7 +5108,7 @@ func testFunctional() { function = "MakeBucket(bucketName, region)" functionAll = "MakeBucket(bucketName, region)" args["bucketName"] = bucketName - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) @@ -4069,7 +5139,7 @@ func testFunctional() { args = map[string]interface{}{ "bucketName": bucketName, } - exists, err = c.BucketExists(bucketName) + exists, err = c.BucketExists(context.Background(), bucketName) if err != nil { logError(testName, function, args, startTime, "", "BucketExists failed", err) @@ -4081,12 +5151,12 @@ func testFunctional() { } // Asserting the default bucket policy. - function = "GetBucketPolicy(bucketName)" + function = "GetBucketPolicy(ctx, bucketName)" functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, } - nilPolicy, err := c.GetBucketPolicy(bucketName) + nilPolicy, err := c.GetBucketPolicy(context.Background(), bucketName) if err != nil { logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) return @@ -4106,18 +5176,18 @@ func testFunctional() { "bucketPolicy": readOnlyPolicy, } - err = c.SetBucketPolicy(bucketName, readOnlyPolicy) + err = c.SetBucketPolicy(context.Background(), bucketName, readOnlyPolicy) if err != nil { logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) return } // should return policy `readonly`. - function = "GetBucketPolicy(bucketName)" + function = "GetBucketPolicy(ctx, bucketName)" functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, } - _, err = c.GetBucketPolicy(bucketName) + _, err = c.GetBucketPolicy(context.Background(), bucketName) if err != nil { logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) return @@ -4132,20 +5202,20 @@ func testFunctional() { "bucketName": bucketName, "bucketPolicy": writeOnlyPolicy, } - err = c.SetBucketPolicy(bucketName, writeOnlyPolicy) + err = c.SetBucketPolicy(context.Background(), bucketName, writeOnlyPolicy) if err != nil { logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) return } // should return policy `writeonly`. - function = "GetBucketPolicy(bucketName)" + function = "GetBucketPolicy(ctx, bucketName)" functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, } - _, err = c.GetBucketPolicy(bucketName) + _, err = c.GetBucketPolicy(context.Background(), bucketName) if err != nil { logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) return @@ -4161,7 +5231,7 @@ func testFunctional() { "bucketName": bucketName, "bucketPolicy": readWritePolicy, } - err = c.SetBucketPolicy(bucketName, readWritePolicy) + err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) if err != nil { logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) @@ -4173,7 +5243,7 @@ func testFunctional() { args = map[string]interface{}{ "bucketName": bucketName, } - _, err = c.GetBucketPolicy(bucketName) + _, err = c.GetBucketPolicy(context.Background(), bucketName) if err != nil { logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) return @@ -4183,7 +5253,7 @@ func testFunctional() { function = "ListBuckets()" functionAll += ", " + function args = nil - buckets, err := c.ListBuckets() + buckets, err := c.ListBuckets(context.Background()) if len(buckets) == 0 { logError(testName, function, args, startTime, "", "Found bucket list to be empty", err) @@ -4221,34 +5291,24 @@ func testFunctional() { "contentType": "", } - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if n != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err) - return - } - args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName + "-nolength", "contentType": "binary/octet-stream", } - n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if n != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err) - return - } - // Instantiate a done channel to close all listing. doneCh := make(chan struct{}) defer close(doneCh) @@ -4264,7 +5324,7 @@ func testFunctional() { "isRecursive": isRecursive, } - for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) { + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: true}) { if obj.Key == objectName { objFound = true break @@ -4277,7 +5337,7 @@ func testFunctional() { objFound = false isRecursive = true // Recursive is true. - function = "ListObjectsV2(bucketName, objectName, isRecursive, doneCh)" + function = "ListObjects()" functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, @@ -4285,7 +5345,7 @@ func testFunctional() { "isRecursive": isRecursive, } - for obj := range c.ListObjectsV2(bucketName, objectName, isRecursive, doneCh) { + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Prefix: objectName, Recursive: isRecursive}) { if obj.Key == objectName { objFound = true break @@ -4306,7 +5366,7 @@ func testFunctional() { "isRecursive": isRecursive, } - for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) { + for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { if objIncompl.Key != "" { incompObjNotFound = false break @@ -4323,7 +5383,7 @@ func testFunctional() { "bucketName": bucketName, "objectName": objectName, } - newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) @@ -4349,7 +5409,7 @@ func testFunctional() { "objectName": objectName, "fileName": fileName + "-f", } - err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "FGetObject failed", err) @@ -4363,7 +5423,7 @@ func testFunctional() { "objectName": "", "expires": 3600 * time.Second, } - if _, err = c.PresignedHeadObject(bucketName, "", 3600*time.Second, nil); err == nil { + if _, err = c.PresignedHeadObject(context.Background(), bucketName, "", 3600*time.Second, nil); err == nil { logError(testName, function, args, startTime, "", "PresignedHeadObject success", err) return } @@ -4376,14 +5436,35 @@ func testFunctional() { "objectName": objectName, "expires": 3600 * time.Second, } - presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil) + presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) if err != nil { logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) return } + + transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) + if err != nil { + logError(testName, function, args, startTime, "", "DefaultTransport failed", err) + return + } + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: transport, + } + + req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject request was incorrect", err) + return + } + // Verify if presigned url works. - resp, err := http.Head(presignedHeadURL.String()) + resp, err := httpClient.Do(req) if err != nil { logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) return @@ -4405,7 +5486,7 @@ func testFunctional() { "objectName": "", "expires": 3600 * time.Second, } - _, err = c.PresignedGetObject(bucketName, "", 3600*time.Second, nil) + _, err = c.PresignedGetObject(context.Background(), bucketName, "", 3600*time.Second, nil) if err == nil { logError(testName, function, args, startTime, "", "PresignedGetObject success", err) return @@ -4419,7 +5500,7 @@ func testFunctional() { "objectName": objectName, "expires": 3600 * time.Second, } - presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil) + presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) if err != nil { logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) @@ -4427,7 +5508,13 @@ func testFunctional() { } // Verify if presigned url works. - resp, err = http.Get(presignedGetURL.String()) + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) if err != nil { logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) return @@ -4456,14 +5543,21 @@ func testFunctional() { "expires": 3600 * time.Second, "reqParams": reqParams, } - presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams) + presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) if err != nil { logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) return } + // Verify if presigned url works. - resp, err = http.Get(presignedGetURL.String()) + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) if err != nil { logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) return @@ -4493,7 +5587,7 @@ func testFunctional() { "objectName": "", "expires": 3600 * time.Second, } - _, err = c.PresignedPutObject(bucketName, "", 3600*time.Second) + _, err = c.PresignedPutObject(context.Background(), bucketName, "", 3600*time.Second) if err == nil { logError(testName, function, args, startTime, "", "PresignedPutObject success", err) return @@ -4506,7 +5600,7 @@ func testFunctional() { "objectName": objectName + "-presigned", "expires": 3600 * time.Second, } - presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second) + presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) if err != nil { logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) @@ -4515,25 +5609,19 @@ func testFunctional() { buf = bytes.Repeat([]byte("g"), 1<<19) - req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf)) + req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) if err != nil { logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err) return } - httpClient := &http.Client{ - // Setting a sensible time out of 30secs to wait for response - // headers. Request is pro-actively cancelled after 30secs - // with no response. - Timeout: 30 * time.Second, - Transport: http.DefaultTransport, - } + resp, err = httpClient.Do(req) if err != nil { logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) return } - newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{}) + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err) return @@ -4556,14 +5644,14 @@ func testFunctional() { "bucketName": bucketName, "objectName": objectName, } - err = c.RemoveObject(bucketName, objectName) + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "RemoveObject failed", err) return } args["objectName"] = objectName + "-f" - err = c.RemoveObject(bucketName, objectName+"-f") + err = c.RemoveObject(context.Background(), bucketName, objectName+"-f", minio.RemoveObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "RemoveObject failed", err) @@ -4571,7 +5659,7 @@ func testFunctional() { } args["objectName"] = objectName + "-nolength" - err = c.RemoveObject(bucketName, objectName+"-nolength") + err = c.RemoveObject(context.Background(), bucketName, objectName+"-nolength", minio.RemoveObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "RemoveObject failed", err) @@ -4579,7 +5667,7 @@ func testFunctional() { } args["objectName"] = objectName + "-presigned" - err = c.RemoveObject(bucketName, objectName+"-presigned") + err = c.RemoveObject(context.Background(), bucketName, objectName+"-presigned", minio.RemoveObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "RemoveObject failed", err) @@ -4591,13 +5679,13 @@ func testFunctional() { args = map[string]interface{}{ "bucketName": bucketName, } - err = c.RemoveBucket(bucketName) + err = c.RemoveBucket(context.Background(), bucketName) if err != nil { logError(testName, function, args, startTime, "", "RemoveBucket failed", err) return } - err = c.RemoveBucket(bucketName) + err = c.RemoveBucket(context.Background(), bucketName) if err == nil { logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err) return @@ -4607,14 +5695,8 @@ func testFunctional() { return } - if err = os.Remove(fileName); err != nil { - logError(testName, function, args, startTime, "", "File Remove failed", err) - return - } - if err = os.Remove(fileName + "-f"); err != nil { - logError(testName, function, args, startTime, "", "File Remove failed", err) - return - } + os.Remove(fileName) + os.Remove(fileName + "-f") successLogger(testName, functionAll, args, startTime).Info() } @@ -4628,15 +5710,13 @@ func testGetObjectModified() { args := map[string]interface{}{} // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -4644,32 +5724,32 @@ func testGetObjectModified() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } - defer c.RemoveBucket(bucketName) + defer c.RemoveBucket(context.Background(), bucketName) // Upload an object. objectName := "myobject" args["objectName"] = objectName content := "helloworld" - _, err = c.PutObject(bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"}) if err != nil { logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) return } - defer c.RemoveObject(bucketName, objectName) + defer c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) - reader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err) return @@ -4686,7 +5766,7 @@ func testGetObjectModified() { // Upload different contents to the same object while object is being read. newContent := "goodbyeworld" - _, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"}) if err != nil { logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) return @@ -4729,14 +5809,13 @@ func testPutObjectUploadSeekedObject() { } // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -4744,18 +5823,18 @@ func testPutObjectUploadSeekedObject() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } - defer c.RemoveBucket(bucketName) + defer c.RemoveBucket(context.Background(), bucketName) var tempfile *os.File @@ -4795,25 +5874,21 @@ func testPutObjectUploadSeekedObject() { return } - n, err := c.PutObject(bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if n != int64(length-offset) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid length returned, expected %d got %d", int64(length-offset), n), err) - return - } tempfile.Close() - obj, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + obj, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return } defer obj.Close() - n, err = obj.Seek(int64(offset), 0) + n, err := obj.Seek(int64(offset), 0) if err != nil { logError(testName, function, args, startTime, "", "Seek failed", err) return @@ -4823,12 +5898,17 @@ func testPutObjectUploadSeekedObject() { return } - n, err = c.PutObject(bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if n != int64(length-offset) { + st, err := c.StatObject(context.Background(), bucketName, objectName+"getobject", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(length-offset) { logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err) return } @@ -4862,14 +5942,13 @@ func testMakeBucketErrorV2() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } @@ -4877,7 +5956,7 @@ func testMakeBucketErrorV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -4886,11 +5965,11 @@ func testMakeBucketErrorV2() { args["region"] = region // Make a new bucket in 'eu-west-1'. - if err = c.MakeBucket(bucketName, region); err != nil { + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } - if err = c.MakeBucket(bucketName, region); err == nil { + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err) return } @@ -4923,14 +6002,13 @@ func testGetObjectClosedTwiceV2() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } @@ -4938,14 +6016,14 @@ func testGetObjectClosedTwiceV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -4960,19 +6038,14 @@ func testGetObjectClosedTwiceV2() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err) - return - } - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -5023,14 +6096,13 @@ func testFPutObjectV2() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } @@ -5038,14 +6110,14 @@ func testFPutObjectV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -5082,33 +6154,25 @@ func testFPutObjectV2() { args["fileName"] = file.Name() // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "FPutObject failed", err) return } - if n != int64(11*1024*1024) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) - return - } // Perform FPutObject with no contentType provided (Expecting application/octet-stream) args["objectName"] = objectName + "-Octet" args["contentType"] = "" - n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{}) + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "FPutObject failed", err) return } - if n != int64(11*1024*1024) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) - return - } // Add extension to temp file name fileName := file.Name() - err = os.Rename(file.Name(), fileName+".gtar") + err = os.Rename(fileName, fileName+".gtar") if err != nil { logError(testName, function, args, startTime, "", "Rename failed", err) return @@ -5119,28 +6183,30 @@ func testFPutObjectV2() { args["contentType"] = "" args["fileName"] = fileName + ".gtar" - n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{}) + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "FPutObject failed", err) return } - if n != int64(11*1024*1024) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) - return - } - // Check headers - rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{}) + // Check headers and sizes + rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "StatObject failed", err) return } + + if rStandard.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + if rStandard.ContentType != "application/octet-stream" { logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err) return } - rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{}) + rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "StatObject failed", err) return @@ -5150,11 +6216,20 @@ func testFPutObjectV2() { return } - rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{}) + if rOctet.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + + rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "StatObject failed", err) return } + if rGTar.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err) return @@ -5166,11 +6241,7 @@ func testFPutObjectV2() { return } - err = os.Remove(fileName + ".gtar") - if err != nil { - logError(testName, function, args, startTime, "", "File remove failed", err) - return - } + os.Remove(fileName + ".gtar") successLogger(testName, function, args, startTime).Info() } @@ -5194,14 +6265,13 @@ func testMakeBucketRegionsV2() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } @@ -5209,14 +6279,14 @@ func testMakeBucketRegionsV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket in 'eu-central-1'. - if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil { + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "eu-west-1"}); err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } @@ -5229,7 +6299,7 @@ func testMakeBucketRegionsV2() { // Make a new bucket with '.' in its name, in 'us-west-2'. This // request is internally staged into a path style instead of // virtual host style. - if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil { + if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: "us-west-2"}); err != nil { args["bucketName"] = bucketName + ".withperiod" args["region"] = "us-west-2" logError(testName, function, args, startTime, "", "MakeBucket failed", err) @@ -5257,14 +6327,13 @@ func testGetObjectReadSeekFunctionalV2() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } @@ -5272,14 +6341,14 @@ func testGetObjectReadSeekFunctionalV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -5300,19 +6369,14 @@ func testGetObjectReadSeekFunctionalV2() { } // Save the data. - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err) - return - } - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -5331,7 +6395,7 @@ func testGetObjectReadSeekFunctionalV2() { } offset := int64(2048) - n, err = r.Seek(offset, 0) + n, err := r.Seek(offset, 0) if err != nil { logError(testName, function, args, startTime, "", "Seek failed", err) return @@ -5421,14 +6485,13 @@ func testGetObjectReadAtFunctionalV2() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } @@ -5436,14 +6499,14 @@ func testGetObjectReadAtFunctionalV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -5464,19 +6527,14 @@ func testGetObjectReadAtFunctionalV2() { } // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err) - return - } - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -5543,7 +6601,7 @@ func testGetObjectReadAtFunctionalV2() { return } - buf5 := make([]byte, n) + buf5 := make([]byte, bufSize) // Read the whole object. m, err = r.ReadAt(buf5, 0) if err != nil { @@ -5561,7 +6619,7 @@ func testGetObjectReadAtFunctionalV2() { return } - buf6 := make([]byte, n+1) + buf6 := make([]byte, bufSize+1) // Read the whole object and beyond. _, err = r.ReadAt(buf6, 0) if err != nil { @@ -5591,14 +6649,13 @@ func testCopyObjectV2() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } @@ -5606,20 +6663,20 @@ func testCopyObjectV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } // Make a new bucket in 'us-east-1' (destination bucket). - err = c.MakeBucket(bucketName+"-copy", "us-east-1") + err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -5632,18 +6689,13 @@ func testCopyObjectV2() { // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err) - return - } - - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -5657,66 +6709,36 @@ func testCopyObjectV2() { r.Close() // Copy Source - src := minio.NewSourceInfo(bucketName, objectName, nil) + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + MatchETag: objInfo.ETag, + } args["source"] = src // Set copy conditions. - - // All invalid conditions first. - err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) - if err == nil { - logError(testName, function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err) - return - } - err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) - if err == nil { - logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err) - return - } - err = src.SetMatchETagCond("") - if err == nil { - logError(testName, function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err) - return - } - err = src.SetMatchETagExceptCond("") - if err == nil { - logError(testName, function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err) - return - } - - err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) - if err != nil { - logError(testName, function, args, startTime, "", "SetModifiedSinceCond failed", err) - return - } - err = src.SetMatchETagCond(objInfo.ETag) - if err != nil { - logError(testName, function, args, startTime, "", "SetMatchETagCond failed", err) - return + dst := minio.CopyDestOptions{ + Bucket: bucketName + "-copy", + Object: objectName + "-copy", } - - dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil) args["destination"] = dst - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } // Perform the Copy - err = c.CopyObject(dst, src) + _, err = c.CopyObject(context.Background(), dst, src) if err != nil { logError(testName, function, args, startTime, "", "CopyObject failed", err) return } // Source object - r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return } // Destination object - readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) + readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -5742,20 +6764,15 @@ func testCopyObjectV2() { readerCopy.Close() // CopyObject again but with wrong conditions - src = minio.NewSourceInfo(bucketName, objectName, nil) - err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) - if err != nil { - logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond failed", err) - return - } - err = src.SetMatchETagExceptCond(objInfo.ETag) - if err != nil { - logError(testName, function, args, startTime, "", "SetMatchETagExceptCond failed", err) - return + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + NoMatchETag: objInfo.ETag, } // Perform the Copy which should fail - err = c.CopyObject(dst, src) + _, err = c.CopyObject(context.Background(), dst, src) if err == nil { logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) return @@ -5784,7 +6801,7 @@ func testComposeObjectErrorCasesWrapper(c *minio.Client) { bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(bucketName, "us-east-1") + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) @@ -5793,19 +6810,18 @@ func testComposeObjectErrorCasesWrapper(c *minio.Client) { // Test that more than 10K source objects cannot be // concatenated. - srcArr := [10001]minio.SourceInfo{} + srcArr := [10001]minio.CopySrcOptions{} srcSlice := srcArr[:] - dst, err := minio.NewDestinationInfo(bucketName, "object", nil, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "object", } args["destination"] = dst // Just explain about srcArr in args["sourceList"] // to stop having 10,001 null headers logged args["sourceList"] = "source array of 10,001 elements" - if err := c.ComposeObject(dst, srcSlice); err == nil { + if _, err := c.ComposeObject(context.Background(), dst, srcSlice...); err == nil { logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err) return } else if err.Error() != "There must be as least one and up to 10000 source objects." { @@ -5818,21 +6834,23 @@ func testComposeObjectErrorCasesWrapper(c *minio.Client) { // 1. Create the source object. const badSrcSize = 5 * 1024 * 1024 buf := bytes.Repeat([]byte("1"), badSrcSize) - _, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + _, err = c.PutObject(context.Background(), bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } // 2. Set invalid range spec on the object (going beyond // object size) - badSrc := minio.NewSourceInfo(bucketName, "badObject", nil) - err = badSrc.SetRange(1, badSrcSize) - if err != nil { - logError(testName, function, args, startTime, "", "Setting NewSourceInfo failed", err) - return + badSrc := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "badObject", + MatchRange: true, + Start: 1, + End: badSrcSize, } + // 3. ComposeObject call should fail. - if err := c.ComposeObject(dst, []minio.SourceInfo{badSrc}); err == nil { + if _, err := c.ComposeObject(context.Background(), dst, badSrc); err == nil { logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err) return } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") { @@ -5858,14 +6876,13 @@ func testComposeObjectErrorCasesV2() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } @@ -5885,7 +6902,7 @@ func testComposeMultipleSources(c *minio.Client) { // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(bucketName, "us-east-1") + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -5894,39 +6911,44 @@ func testComposeMultipleSources(c *minio.Client) { // Upload a small source object const srcSize = 1024 * 1024 * 5 buf := bytes.Repeat([]byte("1"), srcSize) - _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } // We will append 10 copies of the object. - srcs := []minio.SourceInfo{} + srcs := []minio.CopySrcOptions{} for i := 0; i < 10; i++ { - srcs = append(srcs, minio.NewSourceInfo(bucketName, "srcObject", nil)) + srcs = append(srcs, minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + }) } + // make the last part very small - err = srcs[9].SetRange(0, 0) - if err != nil { - logError(testName, function, args, startTime, "", "SetRange failed", err) - return - } + srcs[9].MatchRange = true + args["sourceList"] = srcs - dst, err := minio.NewDestinationInfo(bucketName, "dstObject", nil, nil) + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject", + } args["destination"] = dst + ui, err := c.ComposeObject(context.Background(), dst, srcs...) if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + logError(testName, function, args, startTime, "", "ComposeObject failed", err) return } - err = c.ComposeObject(dst, srcs) - if err != nil { - logError(testName, function, args, startTime, "", "ComposeObject failed", err) + + if ui.Size != 9*srcSize+1 { + logError(testName, function, args, startTime, "", "ComposeObject returned unexpected size", err) return } - objProps, err := c.StatObject(bucketName, "dstObject", minio.StatObjectOptions{}) + objProps, err := c.StatObject(context.Background(), bucketName, "dstObject", minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "StatObject failed", err) return @@ -5953,14 +6975,13 @@ func testCompose10KSourcesV2() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } @@ -5975,14 +6996,13 @@ func testEncryptedEmptyObject() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } @@ -5990,7 +7010,7 @@ func testEncryptedEmptyObject() { bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -6002,46 +7022,53 @@ func testEncryptedEmptyObject() { const srcSize = 0 var buf []byte // Empty buffer args["objectName"] = "object" - _, err = c.PutObject(bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + _, err = c.PutObject(context.Background(), bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) if err != nil { logError(testName, function, args, startTime, "", "PutObject call failed", err) return } // 2. Test CopyObject for an empty object - dstInfo, err := minio.NewDestinationInfo(bucketName, "new-object", sse, nil) - if err != nil { - args["objectName"] = "new-object" - function = "NewDestinationInfo(bucketName, objectName, sse, userMetadata)" - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "object", + Encryption: sse, + } + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: sse, } - srcInfo := minio.NewSourceInfo(bucketName, "object", sse) - if err = c.CopyObject(dstInfo, srcInfo); err != nil { - function = "CopyObject(dstInfo, srcInfo)" + + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + function = "CopyObject(dst, src)" logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err) return } // 3. Test Key rotation newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object")) - dstInfo, err = minio.NewDestinationInfo(bucketName, "new-object", newSSE, nil) - if err != nil { - args["objectName"] = "new-object" - function = "NewDestinationInfo(bucketName, objectName, encryptSSEC, userMetadata)" - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: sse, + } + + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: newSSE, } - srcInfo = minio.NewSourceInfo(bucketName, "new-object", sse) - if err = c.CopyObject(dstInfo, srcInfo); err != nil { - function = "CopyObject(dstInfo, srcInfo)" + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + function = "CopyObject(dst, src)" logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err) return } // 4. Download the object. - reader, err := c.GetObject(bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + reader, err := c.GetObject(context.Background(), bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -6070,13 +7097,13 @@ func testEncryptedEmptyObject() { func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) { // initialize logging params startTime := time.Now() - testName := getFuncName() + testName := getFuncNameLoc(2) function := "CopyObject(destination, source)" args := map[string]interface{}{} var srcEncryption, dstEncryption encrypt.ServerSide // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(bucketName, "us-east-1") + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -6085,7 +7112,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, // 1. create an sse-c encrypted object to copy by uploading const srcSize = 1024 * 1024 buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB - _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ ServerSideEncryption: sseSrc, }) if err != nil { @@ -6098,16 +7125,21 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, } // 2. copy object and change encryption key - src := minio.NewSourceInfo(bucketName, "srcObject", srcEncryption) + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: srcEncryption, + } args["source"] = src - dst, err := minio.NewDestinationInfo(bucketName, "dstObject", sseDst, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject", + Encryption: sseDst, } args["destination"] = dst - err = c.CopyObject(dst, src) + _, err = c.CopyObject(context.Background(), dst, src) if err != nil { logError(testName, function, args, startTime, "", "CopyObject failed", err) return @@ -6118,7 +7150,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, } // 3. get copied object and check if content is equal coreClient := minio.Core{c} - reader, _, err := coreClient.GetObject(bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption}) + reader, _, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -6144,21 +7176,21 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, newSSE = encrypt.NewSSE() } if newSSE != nil { - dst, err = minio.NewDestinationInfo(bucketName, "srcObject", newSSE, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: newSSE, } args["destination"] = dst - err = c.CopyObject(dst, src) + _, err = c.CopyObject(context.Background(), dst, src) if err != nil { logError(testName, function, args, startTime, "", "CopyObject failed", err) return } // Get copied object and check if content is equal - reader, _, err = coreClient.GetObject(bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -6174,17 +7206,21 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, return } reader.Close() + // Test in-place decryption. - dst, err = minio.NewDestinationInfo(bucketName, "srcObject", nil, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObject", } args["destination"] = dst - src = minio.NewSourceInfo(bucketName, "srcObject", newSSE) + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: newSSE, + } args["source"] = src - err = c.CopyObject(dst, src) + _, err = c.CopyObject(context.Background(), dst, src) if err != nil { logError(testName, function, args, startTime, "", "CopyObject Key rotation failed", err) return @@ -6192,7 +7228,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, } // Get copied decrypted object and check if content is equal - reader, _, err = coreClient.GetObject(bucketName, "srcObject", minio.GetObjectOptions{}) + reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -6227,23 +7263,21 @@ func testUnencryptedToSSECCopyObject() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - var sseSrc encrypt.ServerSide sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) + testEncryptedCopyObjectWrapper(c, bucketName, nil, sseDst) } // Test encrypted copy object @@ -6255,14 +7289,13 @@ func testUnencryptedToSSES3CopyObject() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } // Generate a new random bucket name. @@ -6283,14 +7316,13 @@ func testUnencryptedToUnencryptedCopyObject() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } // Generate a new random bucket name. @@ -6310,14 +7342,13 @@ func testEncryptedSSECToSSECCopyObject() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } // Generate a new random bucket name. @@ -6338,14 +7369,13 @@ func testEncryptedSSECToSSES3CopyObject() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } // Generate a new random bucket name. @@ -6366,14 +7396,13 @@ func testEncryptedSSECToUnencryptedCopyObject() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } // Generate a new random bucket name. @@ -6394,14 +7423,13 @@ func testEncryptedSSES3ToSSECCopyObject() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } // Generate a new random bucket name. @@ -6422,14 +7450,13 @@ func testEncryptedSSES3ToSSES3CopyObject() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } // Generate a new random bucket name. @@ -6450,14 +7477,13 @@ func testEncryptedSSES3ToUnencryptedCopyObject() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } // Generate a new random bucket name. @@ -6478,14 +7504,13 @@ func testEncryptedCopyObjectV2() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } // Generate a new random bucket name. @@ -6505,25 +7530,24 @@ func testDecryptedCopyObject() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object" - if err = c.MakeBucket(bucketName, "us-east-1"); err != nil { + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}); err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)) - _, err = c.PutObject(bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{ + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{ ServerSideEncryption: encryption, }) if err != nil { @@ -6531,26 +7555,200 @@ func testDecryptedCopyObject() { return } - src := minio.NewSourceInfo(bucketName, objectName, encrypt.SSECopy(encryption)) + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + Encryption: encrypt.SSECopy(encryption), + } args["source"] = src - dst, err := minio.NewDestinationInfo(bucketName, "decrypted-"+objectName, nil, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "decrypted-" + objectName, } args["destination"] = dst - if err = c.CopyObject(dst, src); err != nil { + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { logError(testName, function, args, startTime, "", "CopyObject failed", err) return } - if _, err = c.GetObject(bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil { + if _, err = c.GetObject(context.Background(), bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return } successLogger(testName, function, args, startTime).Info() } +func testSSECMultipartEncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 6MB of data + buf := bytes.Repeat([]byte("abcdef"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + + // Upload a 6MB object using multipart mechanism + uploadID, err := c.NewMultipartUpload(context.Background(), bucketName, objectName, minio.PutObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + } + + var completeParts []minio.CompletePart + + part, err := c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024, "", "", srcencryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) + } + completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) + + part, err = c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 2, bytes.NewReader(buf[5*1024*1024:]), 1024*1024, "", "", srcencryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) + } + completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err = c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = objInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if objInfo.Size != (6*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 6*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf := make([]byte, 6*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 6MB", err) + } + + getOpts.SetRange(6*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf = make([]byte, 6*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf[:6*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 6MB", err) + } + if getBuf[6*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + // Test Core CopyObjectPart implementation func testSSECEncryptedToSSECCopyObjectPart() { // initialize logging params @@ -6560,14 +7758,13 @@ func testSSECEncryptedToSSECCopyObjectPart() { args := map[string]interface{}{} // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } @@ -6578,13 +7775,13 @@ func testSSECEncryptedToSSECCopyObjectPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) } @@ -6596,23 +7793,32 @@ func testSSECEncryptedToSSECCopyObjectPart() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") password := "correct horse battery staple" srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ + putmetadata := map[string]string{ "Content-Type": "binary/octet-stream", - }, srcencryption) + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + ServerSideEncryption: srcencryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) if err != nil { logError(testName, function, args, startTime, "", "PutObject call failed", err) } - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) } destBucketName := bucketName destObjectName := objectName + "-dest" dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) if err != nil { logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) } @@ -6627,32 +7833,35 @@ func testSSECEncryptedToSSECCopyObjectPart() { for k, v := range header { metadata[k] = v[0] } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) } // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}}) + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) if err != nil { logError(testName, function, args, startTime, "", "StatObject call failed", err) } @@ -6664,12 +7873,12 @@ func testSSECEncryptedToSSECCopyObjectPart() { // Now we read the data back getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -6678,12 +7887,12 @@ func testSSECEncryptedToSSECCopyObjectPart() { } getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -6708,14 +7917,13 @@ func testSSECEncryptedToUnencryptedCopyPart() { args := map[string]interface{}{} // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } @@ -6726,13 +7934,13 @@ func testSSECEncryptedToUnencryptedCopyPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) } @@ -6745,22 +7953,31 @@ func testSSECEncryptedToUnencryptedCopyPart() { password := "correct horse battery staple" srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, srcencryption) + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcencryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) if err != nil { logError(testName, function, args, startTime, "", "PutObject call failed", err) } - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) } destBucketName := bucketName destObjectName := objectName + "-dest" var dstencryption encrypt.ServerSide - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) if err != nil { logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) } @@ -6774,32 +7991,35 @@ func testSSECEncryptedToUnencryptedCopyPart() { for k, v := range header { metadata[k] = v[0] } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) } // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "StatObject call failed", err) } @@ -6811,12 +8031,12 @@ func testSSECEncryptedToUnencryptedCopyPart() { // Now we read the data back getOpts := minio.GetObjectOptions{} getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -6825,12 +8045,12 @@ func testSSECEncryptedToUnencryptedCopyPart() { } getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -6855,14 +8075,13 @@ func testSSECEncryptedToSSES3CopyObjectPart() { args := map[string]interface{}{} // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } @@ -6873,13 +8092,13 @@ func testSSECEncryptedToSSES3CopyObjectPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) } @@ -6891,23 +8110,33 @@ func testSSECEncryptedToSSES3CopyObjectPart() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") password := "correct horse battery staple" srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ + putmetadata := map[string]string{ "Content-Type": "binary/octet-stream", - }, srcencryption) + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + ServerSideEncryption: srcencryption, + } + + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) if err != nil { logError(testName, function, args, startTime, "", "PutObject call failed", err) } - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) } destBucketName := bucketName destObjectName := objectName + "-dest" dstencryption := encrypt.NewSSE() - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) if err != nil { logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) } @@ -6923,32 +8152,35 @@ func testSSECEncryptedToSSES3CopyObjectPart() { for k, v := range header { metadata[k] = v[0] } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) } // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "StatObject call failed", err) } @@ -6960,12 +8192,12 @@ func testSSECEncryptedToSSES3CopyObjectPart() { // Now we read the data back getOpts := minio.GetObjectOptions{} getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -6974,12 +8206,12 @@ func testSSECEncryptedToSSES3CopyObjectPart() { } getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -7004,14 +8236,13 @@ func testUnencryptedToSSECCopyObjectPart() { args := map[string]interface{}{} // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } @@ -7022,13 +8253,13 @@ func testUnencryptedToSSECCopyObjectPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) } @@ -7039,23 +8270,31 @@ func testUnencryptedToSSECCopyObjectPart() { // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") password := "correct horse battery staple" - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ + putmetadata := map[string]string{ "Content-Type": "binary/octet-stream", - }, nil) + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) if err != nil { logError(testName, function, args, startTime, "", "PutObject call failed", err) } - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) } destBucketName := bucketName destObjectName := objectName + "-dest" dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) if err != nil { logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) } @@ -7069,32 +8308,35 @@ func testUnencryptedToSSECCopyObjectPart() { for k, v := range header { metadata[k] = v[0] } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) } // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}}) + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) if err != nil { logError(testName, function, args, startTime, "", "StatObject call failed", err) } @@ -7106,12 +8348,12 @@ func testUnencryptedToSSECCopyObjectPart() { // Now we read the data back getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -7120,12 +8362,12 @@ func testUnencryptedToSSECCopyObjectPart() { } getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -7150,14 +8392,13 @@ func testUnencryptedToUnencryptedCopyPart() { args := map[string]interface{}{} // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } @@ -7168,13 +8409,13 @@ func testUnencryptedToUnencryptedCopyPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) } @@ -7184,22 +8425,29 @@ func testUnencryptedToUnencryptedCopyPart() { // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ + putmetadata := map[string]string{ "Content-Type": "binary/octet-stream", - }, nil) + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) if err != nil { logError(testName, function, args, startTime, "", "PutObject call failed", err) } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) } destBucketName := bucketName destObjectName := objectName + "-dest" - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{}) + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) } @@ -7212,32 +8460,35 @@ func testUnencryptedToUnencryptedCopyPart() { for k, v := range header { metadata[k] = v[0] } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) } // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "StatObject call failed", err) } @@ -7249,12 +8500,12 @@ func testUnencryptedToUnencryptedCopyPart() { // Now we read the data back getOpts := minio.GetObjectOptions{} getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -7263,12 +8514,12 @@ func testUnencryptedToUnencryptedCopyPart() { } getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -7293,14 +8544,13 @@ func testUnencryptedToSSES3CopyObjectPart() { args := map[string]interface{}{} // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } @@ -7311,13 +8561,13 @@ func testUnencryptedToSSES3CopyObjectPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) } @@ -7327,23 +8577,29 @@ func testUnencryptedToSSES3CopyObjectPart() { // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, nil) + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) if err != nil { logError(testName, function, args, startTime, "", "PutObject call failed", err) } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) } destBucketName := bucketName destObjectName := objectName + "-dest" dstencryption := encrypt.NewSSE() - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) if err != nil { logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) } @@ -7358,32 +8614,35 @@ func testUnencryptedToSSES3CopyObjectPart() { for k, v := range header { metadata[k] = v[0] } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) } // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "StatObject call failed", err) } @@ -7395,12 +8654,12 @@ func testUnencryptedToSSES3CopyObjectPart() { // Now we read the data back getOpts := minio.GetObjectOptions{} getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -7409,12 +8668,12 @@ func testUnencryptedToSSES3CopyObjectPart() { } getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -7439,14 +8698,13 @@ func testSSES3EncryptedToSSECCopyObjectPart() { args := map[string]interface{}{} // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } @@ -7457,13 +8715,13 @@ func testSSES3EncryptedToSSECCopyObjectPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) } @@ -7475,22 +8733,31 @@ func testSSES3EncryptedToSSECCopyObjectPart() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") password := "correct horse battery staple" srcEncryption := encrypt.NewSSE() - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, srcEncryption) + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) if err != nil { logError(testName, function, args, startTime, "", "PutObject call failed", err) } - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) } destBucketName := bucketName destObjectName := objectName + "-dest" dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) if err != nil { logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) } @@ -7504,32 +8771,35 @@ func testSSES3EncryptedToSSECCopyObjectPart() { for k, v := range header { metadata[k] = v[0] } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) } // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}}) + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) if err != nil { logError(testName, function, args, startTime, "", "StatObject call failed", err) } @@ -7541,12 +8811,12 @@ func testSSES3EncryptedToSSECCopyObjectPart() { // Now we read the data back getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -7555,12 +8825,12 @@ func testSSES3EncryptedToSSECCopyObjectPart() { } getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -7585,14 +8855,13 @@ func testSSES3EncryptedToUnencryptedCopyPart() { args := map[string]interface{}{} // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } @@ -7603,13 +8872,13 @@ func testSSES3EncryptedToUnencryptedCopyPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) } @@ -7620,22 +8889,29 @@ func testSSES3EncryptedToUnencryptedCopyPart() { // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") srcEncryption := encrypt.NewSSE() - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, srcEncryption) + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) if err != nil { logError(testName, function, args, startTime, "", "PutObject call failed", err) } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) } destBucketName := bucketName destObjectName := objectName + "-dest" - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{}) + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) } @@ -7648,32 +8924,35 @@ func testSSES3EncryptedToUnencryptedCopyPart() { for k, v := range header { metadata[k] = v[0] } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) } // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "StatObject call failed", err) } @@ -7685,12 +8964,12 @@ func testSSES3EncryptedToUnencryptedCopyPart() { // Now we read the data back getOpts := minio.GetObjectOptions{} getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -7699,12 +8978,12 @@ func testSSES3EncryptedToUnencryptedCopyPart() { } getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -7729,14 +9008,13 @@ func testSSES3EncryptedToSSES3CopyObjectPart() { args := map[string]interface{}{} // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } @@ -7747,13 +9025,13 @@ func testSSES3EncryptedToSSES3CopyObjectPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) } @@ -7764,23 +9042,30 @@ func testSSES3EncryptedToSSES3CopyObjectPart() { // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") srcEncryption := encrypt.NewSSE() + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, srcEncryption) + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) if err != nil { logError(testName, function, args, startTime, "", "PutObject call failed", err) } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) } destBucketName := bucketName destObjectName := objectName + "-dest" dstencryption := encrypt.NewSSE() - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) if err != nil { logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) } @@ -7795,32 +9080,35 @@ func testSSES3EncryptedToSSES3CopyObjectPart() { for k, v := range header { metadata[k] = v[0] } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) if err != nil { logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) } // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "StatObject call failed", err) } @@ -7832,12 +9120,12 @@ func testSSES3EncryptedToSSES3CopyObjectPart() { // Now we read the data back getOpts := minio.GetObjectOptions{} getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -7846,12 +9134,12 @@ func testSSES3EncryptedToSSES3CopyObjectPart() { } getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) if err != nil { logError(testName, function, args, startTime, "", "GetObject call failed", err) } getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) + _, err = readFull(r, getBuf) if err != nil { logError(testName, function, args, startTime, "", "Read buffer failed", err) } @@ -7874,14 +9162,13 @@ func testUserMetadataCopying() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -7899,14 +9186,14 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(bucketName, "us-east-1") + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } fetchMeta := func(object string) (h http.Header) { - objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{}) + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "Stat failed", err) return @@ -7914,9 +9201,7 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { h = make(http.Header) for k, vs := range objInfo.Metadata { if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { - for _, v := range vs { - h.Add(k, v) - } + h.Add(k, vs[0]) } } return h @@ -7929,7 +9214,7 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { metadata.Set("x-amz-meta-myheader", "myvalue") m := make(map[string]string) m["x-amz-meta-myheader"] = "myvalue" - _, err = c.PutObject(bucketName, "srcObject", + _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m}) if err != nil { logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err) @@ -7941,19 +9226,24 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { } // 2. create source - src := minio.NewSourceInfo(bucketName, "srcObject", nil) + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + } + // 2.1 create destination with metadata set - dst1, err := minio.NewDestinationInfo(bucketName, "dstObject-1", nil, map[string]string{"notmyheader": "notmyvalue"}) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return + dst1 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-1", + UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, + ReplaceMetadata: true, } // 3. Check that copying to an object with metadata set resets // the headers on the copy. args["source"] = src args["destination"] = dst1 - err = c.CopyObject(dst1, src) + _, err = c.CopyObject(context.Background(), dst1, src) if err != nil { logError(testName, function, args, startTime, "", "CopyObject failed", err) return @@ -7967,18 +9257,16 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { } // 4. create destination with no metadata set and same source - dst2, err := minio.NewDestinationInfo(bucketName, "dstObject-2", nil, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return + dst2 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-2", } - src = minio.NewSourceInfo(bucketName, "srcObject", nil) // 5. Check that copying to an object with no metadata set, // copies metadata. args["source"] = src args["destination"] = dst2 - err = c.CopyObject(dst2, src) + _, err = c.CopyObject(context.Background(), dst2, src) if err != nil { logError(testName, function, args, startTime, "", "CopyObject failed", err) return @@ -7991,20 +9279,16 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { } // 6. Compose a pair of sources. - srcs := []minio.SourceInfo{ - minio.NewSourceInfo(bucketName, "srcObject", nil), - minio.NewSourceInfo(bucketName, "srcObject", nil), - } - dst3, err := minio.NewDestinationInfo(bucketName, "dstObject-3", nil, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return + dst3 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-3", + ReplaceMetadata: true, } function = "ComposeObject(destination, sources)" - args["source"] = srcs + args["source"] = []minio.CopySrcOptions{src, src} args["destination"] = dst3 - err = c.ComposeObject(dst3, srcs) + _, err = c.ComposeObject(context.Background(), dst3, src, src) if err != nil { logError(testName, function, args, startTime, "", "ComposeObject failed", err) return @@ -8017,20 +9301,17 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { } // 7. Compose a pair of sources with dest user metadata set. - srcs = []minio.SourceInfo{ - minio.NewSourceInfo(bucketName, "srcObject", nil), - minio.NewSourceInfo(bucketName, "srcObject", nil), - } - dst4, err := minio.NewDestinationInfo(bucketName, "dstObject-4", nil, map[string]string{"notmyheader": "notmyvalue"}) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return + dst4 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-4", + UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, + ReplaceMetadata: true, } function = "ComposeObject(destination, sources)" - args["source"] = srcs + args["source"] = []minio.CopySrcOptions{src, src} args["destination"] = dst4 - err = c.ComposeObject(dst4, srcs) + _, err = c.ComposeObject(context.Background(), dst4, src, src) if err != nil { logError(testName, function, args, startTime, "", "ComposeObject failed", err) return @@ -8061,14 +9342,13 @@ func testUserMetadataCopyingV2() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } @@ -8084,28 +9364,27 @@ func testStorageClassMetadataPutObject() { testName := getFuncName() // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } fetchMeta := func(object string) (h http.Header) { - objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{}) + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "Stat failed", err) return @@ -8129,7 +9408,7 @@ func testStorageClassMetadataPutObject() { const srcSize = 1024 * 1024 buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB - _, err = c.PutObject(bucketName, "srcObjectRRSClass", + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) @@ -8148,7 +9427,7 @@ func testStorageClassMetadataPutObject() { metadata = make(http.Header) metadata.Set("x-amz-storage-class", "STANDARD") - _, err = c.PutObject(bucketName, "srcObjectSSClass", + _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) @@ -8174,21 +9453,20 @@ func testStorageClassInvalidMetadataPutObject() { testName := getFuncName() // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -8197,7 +9475,7 @@ func testStorageClassInvalidMetadataPutObject() { const srcSize = 1024 * 1024 buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB - _, err = c.PutObject(bucketName, "srcObjectRRSClass", + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"}) if err == nil { logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err) @@ -8219,28 +9497,29 @@ func testStorageClassMetadataCopyObject() { testName := getFuncName() // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } fetchMeta := func(object string) (h http.Header) { - objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{}) + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) + args["bucket"] = bucketName + args["object"] = object if err != nil { logError(testName, function, args, startTime, "", "Stat failed", err) return @@ -8265,7 +9544,7 @@ func testStorageClassMetadataCopyObject() { buf := bytes.Repeat([]byte("abcde"), srcSize) // Put an object with RRS Storage class - _, err = c.PutObject(bucketName, "srcObjectRRSClass", + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) @@ -8273,9 +9552,17 @@ func testStorageClassMetadataCopyObject() { } // Make server side copy of object uploaded in previous step - src := minio.NewSourceInfo(bucketName, "srcObjectRRSClass", nil) - dst, err := minio.NewDestinationInfo(bucketName, "srcObjectRRSClassCopy", nil, nil) - c.CopyObject(dst, src) + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObjectRRSClass", + } + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObjectRRSClassCopy", + } + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed on RRS", err) + } // Get the returned metadata returnedMeta := fetchMeta("srcObjectRRSClassCopy") @@ -8290,7 +9577,7 @@ func testStorageClassMetadataCopyObject() { metadata.Set("x-amz-storage-class", "STANDARD") // Put an object with Standard Storage class - _, err = c.PutObject(bucketName, "srcObjectSSClass", + _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) @@ -8298,10 +9585,17 @@ func testStorageClassMetadataCopyObject() { } // Make server side copy of object uploaded in previous step - src = minio.NewSourceInfo(bucketName, "srcObjectSSClass", nil) - dst, err = minio.NewDestinationInfo(bucketName, "srcObjectSSClassCopy", nil, nil) - c.CopyObject(dst, src) - + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObjectSSClass", + } + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObjectSSClassCopy", + } + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed on SS", err) + } // Fetch the meta data of copied object if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) { logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) @@ -8332,14 +9626,13 @@ func testPutObjectNoLengthV2() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } @@ -8347,14 +9640,14 @@ func testPutObjectNoLengthV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -8363,20 +9656,26 @@ func testPutObjectNoLengthV2() { objectName := bucketName + "unique" args["objectName"] = objectName - bufSize := dataFileMap["datafile-65-MB"] - var reader = getDataReader("datafile-65-MB") + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") defer reader.Close() args["size"] = bufSize // Upload an object. - n, err := c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{}) - + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, -1, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) return } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(n), err) + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(st.Size), err) return } @@ -8406,14 +9705,13 @@ func testPutObjectsUnknownV2() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } @@ -8421,14 +9719,14 @@ func testPutObjectsUnknownV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -8451,14 +9749,25 @@ func testPutObjectsUnknownV2() { objectName := fmt.Sprintf("%sunique%d", bucketName, i) args["objectName"] = objectName - n, err := c.PutObject(bucketName, objectName, rpipe, -1, minio.PutObjectOptions{}) + ui, err := c.PutObject(context.Background(), bucketName, objectName, rpipe, -1, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) return } - args["size"] = n - if n != int64(4) { - logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(n), err) + + if ui.Size != 4 { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(ui.Size), nil) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObjectStreaming failed", err) + return + } + + if st.Size != int64(4) { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(st.Size), err) return } @@ -8490,14 +9799,13 @@ func testPutObject0ByteV2() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } @@ -8505,14 +9813,14 @@ func testPutObject0ByteV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -8523,14 +9831,18 @@ func testPutObject0ByteV2() { args["opts"] = minio.PutObjectOptions{} // Upload an object. - n, err := c.PutObject(bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{}) - + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) return } - if n != 0 { - logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(n), err) + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObjectWithSize failed", err) + return + } + if st.Size != 0 { + logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(st.Size), err) return } @@ -8552,14 +9864,13 @@ func testComposeObjectErrorCases() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -8575,14 +9886,13 @@ func testCompose10KSources() { args := map[string]interface{}{} // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -8601,14 +9911,13 @@ func testFunctionalV2() { // Seed random based on current time. rand.Seed(time.Now().Unix()) - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } @@ -8616,7 +9925,7 @@ func testFunctionalV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -8628,7 +9937,7 @@ func testFunctionalV2() { "bucketName": bucketName, "location": location, } - err = c.MakeBucket(bucketName, location) + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -8658,7 +9967,7 @@ func testFunctionalV2() { args = map[string]interface{}{ "bucketName": bucketName, } - exists, err = c.BucketExists(bucketName) + exists, err = c.BucketExists(context.Background(), bucketName) if err != nil { logError(testName, function, args, startTime, "", "BucketExists failed", err) return @@ -8678,7 +9987,7 @@ func testFunctionalV2() { "bucketName": bucketName, "bucketPolicy": readWritePolicy, } - err = c.SetBucketPolicy(bucketName, readWritePolicy) + err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) if err != nil { logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) @@ -8689,7 +9998,7 @@ func testFunctionalV2() { function = "ListBuckets()" functionAll += ", " + function args = nil - buckets, err := c.ListBuckets() + buckets, err := c.ListBuckets(context.Background()) if len(buckets) == 0 { logError(testName, function, args, startTime, "", "List buckets cannot be empty", err) return @@ -8723,26 +10032,36 @@ func testFunctionalV2() { "objectName": objectName, "contentType": "", } - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - if n != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err) + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) return } objectNameNoLength := objectName + "-nolength" args["objectName"] = objectNameNoLength - n, err = c.PutObject(bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - - if n != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err) + st, err = c.StatObject(context.Background(), bucketName, objectNameNoLength, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) return } @@ -8759,7 +10078,7 @@ func testFunctionalV2() { "objectName": objectName, "isRecursive": isRecursive, } - for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) { + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: isRecursive}) { if obj.Key == objectName { objFound = true break @@ -8778,7 +10097,7 @@ func testFunctionalV2() { "objectName": objectName, "isRecursive": isRecursive, } - for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) { + for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { if objIncompl.Key != "" { incompObjNotFound = false break @@ -8795,7 +10114,7 @@ func testFunctionalV2() { "bucketName": bucketName, "objectName": objectName, } - newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -8820,7 +10139,7 @@ func testFunctionalV2() { "objectName": objectName, "fileName": fileName + "-f", } - err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "FgetObject failed", err) return @@ -8834,13 +10153,34 @@ func testFunctionalV2() { "objectName": objectName, "expires": 3600 * time.Second, } - presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil) + presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) if err != nil { logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) return } + + transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) + if err != nil { + logError(testName, function, args, startTime, "", "DefaultTransport failed", err) + return + } + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: transport, + } + + req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) + return + } + // Verify if presigned url works. - resp, err := http.Head(presignedHeadURL.String()) + resp, err := httpClient.Do(req) if err != nil { logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) return @@ -8863,17 +10203,25 @@ func testFunctionalV2() { "objectName": objectName, "expires": 3600 * time.Second, } - presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil) + presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) if err != nil { logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) return } + // Verify if presigned url works. - resp, err = http.Get(presignedGetURL.String()) + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject URL GET request failed", err) + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) return } + if resp.StatusCode != http.StatusOK { logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) return @@ -8894,17 +10242,25 @@ func testFunctionalV2() { reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") // Generate presigned GET object url. args["reqParams"] = reqParams - presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams) + presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) if err != nil { logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) return } + // Verify if presigned url works. - resp, err = http.Get(presignedGetURL.String()) + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject URL GET request failed", err) + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) return } + if resp.StatusCode != http.StatusOK { logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) return @@ -8931,7 +10287,7 @@ func testFunctionalV2() { "objectName": objectName + "-presigned", "expires": 3600 * time.Second, } - presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second) + presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) if err != nil { logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) return @@ -8940,18 +10296,12 @@ func testFunctionalV2() { // Generate data more than 32K buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) - req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf)) + req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) if err != nil { logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) return } - httpClient := &http.Client{ - // Setting a sensible time out of 30secs to wait for response - // headers. Request is pro-actively cancelled after 30secs - // with no response. - Timeout: 30 * time.Second, - Transport: http.DefaultTransport, - } + resp, err = httpClient.Do(req) if err != nil { logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) @@ -8964,7 +10314,7 @@ func testFunctionalV2() { "bucketName": bucketName, "objectName": objectName + "-presigned", } - newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{}) + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -8988,23 +10338,17 @@ func testFunctionalV2() { return } - if err = os.Remove(fileName); err != nil { - logError(testName, function, args, startTime, "", "File remove failed", err) - return - } - if err = os.Remove(fileName + "-f"); err != nil { - logError(testName, function, args, startTime, "", "File removes failed", err) - return - } + os.Remove(fileName) + os.Remove(fileName + "-f") successLogger(testName, functionAll, args, startTime).Info() } -// Test get object with GetObjectWithContext -func testGetObjectWithContext() { +// Test get object with GetObject with context +func testGetObjectContext() { // initialize logging params startTime := time.Now() testName := getFuncName() - function := "GetObjectWithContext(ctx, bucketName, objectName)" + function := "GetObject(ctx, bucketName, objectName)" args := map[string]interface{}{ "ctx": "", "bucketName": "", @@ -9014,14 +10358,13 @@ func testGetObjectWithContext() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } @@ -9029,14 +10372,14 @@ func testGetObjectWithContext() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -9049,7 +10392,7 @@ func testGetObjectWithContext() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return @@ -9059,14 +10402,14 @@ func testGetObjectWithContext() { args["ctx"] = ctx defer cancel() - r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err) + logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) return } if _, err = r.Stat(); err == nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err) + logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) return } r.Close() @@ -9076,9 +10419,9 @@ func testGetObjectWithContext() { defer cancel() // Read the data back - r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) + r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext failed", err) + logError(testName, function, args, startTime, "", "GetObject failed", err) return } @@ -9106,12 +10449,12 @@ func testGetObjectWithContext() { } -// Test get object with FGetObjectWithContext -func testFGetObjectWithContext() { +// Test get object with FGetObject with a user provided context +func testFGetObjectContext() { // initialize logging params startTime := time.Now() testName := getFuncName() - function := "FGetObjectWithContext(ctx, bucketName, objectName, fileName)" + function := "FGetObject(ctx, bucketName, objectName, fileName)" args := map[string]interface{}{ "ctx": "", "bucketName": "", @@ -9122,14 +10465,13 @@ func testFGetObjectWithContext() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } @@ -9137,14 +10479,14 @@ func testFGetObjectWithContext() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -9157,7 +10499,7 @@ func testFGetObjectWithContext() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return @@ -9170,18 +10512,18 @@ func testFGetObjectWithContext() { fileName := "tempfile-context" args["fileName"] = fileName // Read the data back - err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) if err == nil { - logError(testName, function, args, startTime, "", "FGetObjectWithContext should fail on short timeout", err) + logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) return } ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) defer cancel() // Read the data back - err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) if err != nil { - logError(testName, function, args, startTime, "", "FGetObjectWithContext with long timeout failed", err) + logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) return } if err = os.Remove(fileName + "-fcontext"); err != nil { @@ -9198,13 +10540,14 @@ func testFGetObjectWithContext() { } -// Test get object ACLs with GetObjectACL -func testGetObjectACL() { +// Test get object ACLs with GetObjectACL with custom provided context +func testGetObjectACLContext() { // initialize logging params startTime := time.Now() testName := getFuncName() - function := "GetObjectACL(bucketName, objectName)" + function := "GetObjectACL(ctx, bucketName, objectName)" args := map[string]interface{}{ + "ctx": "", "bucketName": "", "objectName": "", } @@ -9218,14 +10561,13 @@ func testGetObjectACL() { } // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } @@ -9233,14 +10575,14 @@ func testGetObjectACL() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -9258,14 +10600,18 @@ func testGetObjectACL() { "X-Amz-Acl": "public-read-write", } - _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + args["ctx"] = ctx + defer cancel() + // Read the data back - objectInfo, getObjectACLErr := c.GetObjectACL(bucketName, objectName) + objectInfo, getObjectACLErr := c.GetObjectACL(ctx, bucketName, objectName) if getObjectACLErr == nil { logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr) return @@ -9300,14 +10646,18 @@ func testGetObjectACL() { "X-Amz-Grant-Write": "id=foowrite@minio.go", } - _, err = c.PutObject(bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) + _, err = c.PutObject(context.Background(), bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) + args["ctx"] = ctx + defer cancel() + // Read the data back - objectInfo, getObjectACLErr = c.GetObjectACL(bucketName, objectName) + objectInfo, getObjectACLErr = c.GetObjectACL(ctx, bucketName, objectName) if getObjectACLErr == nil { logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr) return @@ -9360,11 +10710,11 @@ func testGetObjectACL() { } // Test validates putObject with context to see if request cancellation is honored for V2. -func testPutObjectWithContextV2() { +func testPutObjectContextV2() { // initialize logging params startTime := time.Now() testName := getFuncName() - function := "PutObjectWithContext(ctx, bucketName, objectName, reader, size, opts)" + function := "PutObject(ctx, bucketName, objectName, reader, size, opts)" args := map[string]interface{}{ "ctx": "", "bucketName": "", @@ -9373,14 +10723,13 @@ func testPutObjectWithContextV2() { "opts": "", } // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } @@ -9388,18 +10737,18 @@ func testPutObjectWithContextV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } - defer c.RemoveBucket(bucketName) + defer c.RemoveBucket(context.Background(), bucketName) bufSize := dataFileMap["datatfile-33-kB"] var reader = getDataReader("datafile-33-kB") defer reader.Close() @@ -9412,9 +10761,9 @@ func testPutObjectWithContextV2() { args["size"] = bufSize defer cancel() - _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithContext with short timeout failed", err) + logError(testName, function, args, startTime, "", "PutObject with short timeout failed", err) return } @@ -9424,9 +10773,9 @@ func testPutObjectWithContextV2() { defer cancel() reader = getDataReader("datafile-33-kB") defer reader.Close() - _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithContext with long timeout failed", err) + logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) return } @@ -9440,12 +10789,12 @@ func testPutObjectWithContextV2() { } -// Test get object with GetObjectWithContext -func testGetObjectWithContextV2() { +// Test get object with GetObject with custom context +func testGetObjectContextV2() { // initialize logging params startTime := time.Now() testName := getFuncName() - function := "GetObjectWithContext(ctx, bucketName, objectName)" + function := "GetObject(ctx, bucketName, objectName)" args := map[string]interface{}{ "ctx": "", "bucketName": "", @@ -9455,14 +10804,13 @@ func testGetObjectWithContextV2() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } @@ -9470,14 +10818,14 @@ func testGetObjectWithContextV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -9490,7 +10838,7 @@ func testGetObjectWithContextV2() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject call failed", err) return @@ -9500,13 +10848,13 @@ func testGetObjectWithContextV2() { args["ctx"] = ctx defer cancel() - r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) + r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err) + logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) return } if _, err = r.Stat(); err == nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err) + logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) return } r.Close() @@ -9515,9 +10863,9 @@ func testGetObjectWithContextV2() { defer cancel() // Read the data back - r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) + r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext shouldn't fail on longer timeout", err) + logError(testName, function, args, startTime, "", "GetObject shouldn't fail on longer timeout", err) return } @@ -9545,12 +10893,12 @@ func testGetObjectWithContextV2() { } -// Test get object with FGetObjectWithContext -func testFGetObjectWithContextV2() { +// Test get object with FGetObject with custom context +func testFGetObjectContextV2() { // initialize logging params startTime := time.Now() testName := getFuncName() - function := "FGetObjectWithContext(ctx, bucketName, objectName,fileName)" + function := "FGetObject(ctx, bucketName, objectName,fileName)" args := map[string]interface{}{ "ctx": "", "bucketName": "", @@ -9561,14 +10909,13 @@ func testFGetObjectWithContextV2() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } @@ -9576,14 +10923,14 @@ func testFGetObjectWithContextV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket call failed", err) return @@ -9596,7 +10943,7 @@ func testFGetObjectWithContextV2() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject call failed", err) return @@ -9610,18 +10957,18 @@ func testFGetObjectWithContextV2() { args["fileName"] = fileName // Read the data back - err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) if err == nil { - logError(testName, function, args, startTime, "", "FGetObjectWithContext should fail on short timeout", err) + logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) return } ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) defer cancel() // Read the data back - err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) if err != nil { - logError(testName, function, args, startTime, "", "FGetObjectWithContext call shouldn't fail on long timeout", err) + logError(testName, function, args, startTime, "", "FGetObject call shouldn't fail on long timeout", err) return } @@ -9639,7 +10986,7 @@ func testFGetObjectWithContextV2() { } -// Test list object v1 and V2 storage class fields +// Test list object v1 and V2 func testListObjects() { // initialize logging params startTime := time.Now() @@ -9654,14 +11001,13 @@ func testListObjects() { rand.Seed(time.Now().Unix()) // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } @@ -9669,88 +11015,198 @@ func testListObjects() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() + testObjects := []struct { + name string + storageClass string + }{ + // Special characters + {"foo bar", "STANDARD"}, + {"foo-%", "STANDARD"}, + {"random-object-1", "STANDARD"}, + {"random-object-2", "REDUCED_REDUNDANCY"}, + } + + for i, object := range testObjects { + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(context.Background(), bucketName, object.name, reader, int64(bufSize), + minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: object.storageClass}) + if err != nil { + logError(testName, function, args, startTime, "", fmt.Sprintf("PutObject %d call failed", i+1), err) + return + } + } - // Save the data - objectName1 := randString(60, rand.NewSource(time.Now().UnixNano()), "") + testList := func(listFn func(context.Context, string, minio.ListObjectsOptions) <-chan minio.ObjectInfo, bucket string, opts minio.ListObjectsOptions) { + var objCursor int + + // check for object name and storage-class from listing object result + for objInfo := range listFn(context.Background(), bucket, opts) { + if objInfo.Err != nil { + logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err) + return + } + if objInfo.Key != testObjects[objCursor].name { + logError(testName, function, args, startTime, "", "ListObjects does not return expected object name", err) + } + if objInfo.StorageClass != testObjects[objCursor].storageClass { + // Ignored as Gateways (Azure/GCS etc) wont return storage class + ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info() + } + objCursor++ + } + + if objCursor != len(testObjects) { + logError(testName, function, args, startTime, "", "ListObjects returned unexpected number of items", errors.New("")) + } + } + + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, UseV1: true}) + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true}) + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test deleting multiple objects with object retention set in Governance mode +func testRemoveObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectPrefix": "", + "recursive": "true", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - _, err = c.PutObject(bucketName, objectName1, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: "STANDARD"}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "PutObject1 call failed", err) + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - bufSize1 := dataFileMap["datafile-33-kB"] - var reader1 = getDataReader("datafile-33-kB") - defer reader1.Close() - objectName2 := randString(60, rand.NewSource(time.Now().UnixNano()), "") + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName - _, err = c.PutObject(bucketName, objectName2, reader1, int64(bufSize1), minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: "REDUCED_REDUNDANCY"}) + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) if err != nil { - logError(testName, function, args, startTime, "", "PutObject2 call failed", err) + logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } - // Create a done channel to control 'ListObjects' go routine. - doneCh := make(chan struct{}) - // Exit cleanly upon return. - defer close(doneCh) + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() - // check for storage-class from ListObjects result - for objInfo := range c.ListObjects(bucketName, "", true, doneCh) { - if objInfo.Err != nil { - logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err) - return - } - if objInfo.Key == objectName1 && objInfo.StorageClass != "STANDARD" { - // Ignored as Gateways (Azure/GCS etc) wont return storage class - ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info() - } - if objInfo.Key == objectName2 && objInfo.StorageClass != "REDUCED_REDUNDANCY" { - // Ignored as Gateways (Azure/GCS etc) wont return storage class - ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info() - } + n, err := c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + log.Fatalln(err) } + log.Println("Uploaded", objectName, " of size: ", n, "to bucket: ", bucketName, "Successfully.") - // check for storage-class from ListObjectsV2 result - for objInfo := range c.ListObjectsV2(bucketName, "", true, doneCh) { - if objInfo.Err != nil { - logError(testName, function, args, startTime, "", "ListObjectsV2 failed unexpectedly", err) - return + t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC) + m := minio.RetentionMode(minio.Governance) + opts := minio.PutObjectRetentionOptions{ + GovernanceBypass: false, + RetainUntilDate: &t, + Mode: &m, + } + err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) + if err != nil { + log.Fatalln(err) + } + + objectsCh := make(chan minio.ObjectInfo) + // Send object names that are needed to be removed to objectsCh + go func() { + defer close(objectsCh) + // List all objects from a bucket-name with a matching prefix. + for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { + if object.Err != nil { + log.Fatalln(object.Err) + } + objectsCh <- object } - if objInfo.Key == objectName1 && objInfo.StorageClass != "STANDARD" { - // Ignored as Gateways (Azure/GCS etc) wont return storage class - ignoredLog(testName, function, args, startTime, "ListObjectsV2 doesn't return expected storage class").Info() + }() + + for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) { + // Error is expected here because Retention is set on the object + // and RemoveObjects is called without Bypass Governance + if rErr.Err == nil { + logError(testName, function, args, startTime, "", "Expected error during deletion", nil) + return } - if objInfo.Key == objectName2 && objInfo.StorageClass != "REDUCED_REDUNDANCY" { - // Ignored as Gateways (Azure/GCS etc) wont return storage class - ignoredLog(testName, function, args, startTime, "ListObjectsV2 doesn't return expected storage class").Info() + } + + objectsCh1 := make(chan minio.ObjectInfo) + + // Send object names that are needed to be removed to objectsCh + go func() { + defer close(objectsCh1) + // List all objects from a bucket-name with a matching prefix. + for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { + if object.Err != nil { + log.Fatalln(object.Err) + } + objectsCh1 <- object } + }() + + opts1 := minio.RemoveObjectsOptions{ + GovernanceBypass: true, + } + + for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh1, opts1) { + // Error is not expected here because Retention is set on the object + // and RemoveObjects is called with Bypass Governance + logError(testName, function, args, startTime, "", "Error detected during deletion", rErr.Err) + return } // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { + if err = cleanupVersionedBucket(bucketName, c); err != nil { logError(testName, function, args, startTime, "", "Cleanup failed", err) return } successLogger(testName, function, args, startTime).Info() - } // Convert string to bool and always return false if any error @@ -9773,7 +11229,6 @@ func main() { log.SetLevel(log.InfoLevel) tls := mustParseBool(os.Getenv(enableHTTPS)) - kmsEnabled := mustParseBool(os.Getenv(enableKMS)) // execute tests if isFullMode() { testMakeBucketErrorV2() @@ -9790,10 +11245,10 @@ func main() { testPutObject0ByteV2() testPutObjectNoLengthV2() testPutObjectsUnknownV2() - testGetObjectWithContextV2() - testFPutObjectWithContextV2() - testFGetObjectWithContextV2() - testPutObjectWithContextV2() + testGetObjectContextV2() + testFPutObjectContextV2() + testFGetObjectContextV2() + testPutObjectContextV2() testMakeBucketError() testMakeBucketRegions() testPutObjectWithMetadata() @@ -9806,6 +11261,7 @@ func main() { testFPutObject() testGetObjectReadSeekFunctional() testGetObjectReadAtFunctional() + testGetObjectReadAtWhenEOFWasReached() testPresignedPostPolicy() testCopyObject() testComposeObjectErrorCases() @@ -9815,18 +11271,25 @@ func main() { testFunctional() testGetObjectModified() testPutObjectUploadSeekedObject() - testGetObjectWithContext() - testFPutObjectWithContext() - testFGetObjectWithContext() - - testGetObjectACL() - - testPutObjectWithContext() + testGetObjectContext() + testFPutObjectContext() + testFGetObjectContext() + testGetObjectACLContext() + testPutObjectContext() testStorageClassMetadataPutObject() testStorageClassInvalidMetadataPutObject() testStorageClassMetadataCopyObject() testPutObjectWithContentLanguage() testListObjects() + testRemoveObjects() + testListObjectVersions() + testStatObjectWithVersioning() + testGetObjectWithVersioning() + testCopyObjectWithVersioning() + testComposeObjectWithVersioning() + testRemoveObjectWithVersioning() + testRemoveObjectsWithVersioning() + testObjectTaggingWithVersioning() // SSE-C tests will only work over TLS connection. if tls { @@ -9842,26 +11305,27 @@ func main() { testEncryptedEmptyObject() testDecryptedCopyObject() testSSECEncryptedToSSECCopyObjectPart() + testSSECMultipartEncryptedToSSECCopyObjectPart() testSSECEncryptedToUnencryptedCopyPart() testUnencryptedToSSECCopyObjectPart() testUnencryptedToUnencryptedCopyPart() - if kmsEnabled { - testSSES3EncryptionPutGet() - testSSES3EncryptionFPut() - testSSES3EncryptedGetObjectReadAtFunctional() - testSSES3EncryptedGetObjectReadSeekFunctional() - testEncryptedSSECToSSES3CopyObject() - testEncryptedSSES3ToSSECCopyObject() - testEncryptedSSES3ToSSES3CopyObject() - testEncryptedSSES3ToUnencryptedCopyObject() - testUnencryptedToSSES3CopyObject() - testSSECEncryptedToSSES3CopyObjectPart() - testUnencryptedToSSES3CopyObjectPart() - testSSES3EncryptedToSSECCopyObjectPart() - testSSES3EncryptedToUnencryptedCopyPart() - testSSES3EncryptedToSSES3CopyObjectPart() - } - } + testEncryptedSSECToSSES3CopyObject() + testEncryptedSSES3ToSSECCopyObject() + testSSECEncryptedToSSES3CopyObjectPart() + testSSES3EncryptedToSSECCopyObjectPart() + } + + // KMS tests + testSSES3EncryptionPutGet() + testSSES3EncryptionFPut() + testSSES3EncryptedGetObjectReadAtFunctional() + testSSES3EncryptedGetObjectReadSeekFunctional() + testEncryptedSSES3ToSSES3CopyObject() + testEncryptedSSES3ToUnencryptedCopyObject() + testUnencryptedToSSES3CopyObject() + testUnencryptedToSSES3CopyObjectPart() + testSSES3EncryptedToUnencryptedCopyPart() + testSSES3EncryptedToSSES3CopyObjectPart() } else { testFunctional() testFunctionalV2() diff --git a/get-options_test.go b/get-options_test.go index c5344a0c6..a447f09d0 100644 --- a/get-options_test.go +++ b/get-options_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/go.mod b/go.mod index 9c416086f..670f6ea94 100644 --- a/go.mod +++ b/go.mod @@ -1,14 +1,27 @@ -module github.com/minio/minio-go +module github.com/minio/minio-go/v7 + +go 1.12 require ( - github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect - github.com/jtolds/gls v4.2.1+incompatible // indirect + github.com/dustin/go-humanize v1.0.0 // indirect + github.com/google/uuid v1.1.1 + github.com/json-iterator/go v1.1.10 + github.com/klauspost/cpuid v1.3.1 // indirect + github.com/kr/pretty v0.1.0 // indirect + github.com/minio/md5-simd v1.1.0 + github.com/minio/sha256-simd v0.1.1 github.com/mitchellh/go-homedir v1.1.0 - github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 // indirect - github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c // indirect - golang.org/x/crypto v0.0.0-20190128193316-c7b33c32a30b - golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 - golang.org/x/sys v0.0.0-20190124100055-b90733256f2e // indirect - golang.org/x/text v0.3.0 // indirect - gopkg.in/ini.v1 v1.41.0 + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/rs/xid v1.2.1 + github.com/sirupsen/logrus v1.6.0 // indirect + github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a // indirect + github.com/stretchr/testify v1.4.0 // indirect + golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899 + golang.org/x/net v0.0.0-20200707034311-ab3426394381 + golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae // indirect + golang.org/x/text v0.3.3 // indirect + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + gopkg.in/ini.v1 v1.57.0 + gopkg.in/yaml.v2 v2.2.8 // indirect ) diff --git a/go.sum b/go.sum index 5f9d0e6c9..1dce33dd4 100644 --- a/go.sum +++ b/go.sum @@ -1,20 +1,83 @@ -github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg= -github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= -github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/klauspost/cpuid v1.2.3 h1:CCtW0xUnWGVINKvE/WWOYKdsPV6mawAtvQuSl8guwQs= +github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= +github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= +github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY= -github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c h1:Ho+uVpkel/udgjbwB5Lktg9BtvJSh2DT0Hi6LPSyI2w= -github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= -golang.org/x/crypto v0.0.0-20190128193316-c7b33c32a30b h1:Ib/yptP38nXZFMwqWSip+OKuMP9OkyDe3p+DssP8n9w= -golang.org/x/crypto v0.0.0-20190128193316-c7b33c32a30b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 h1:ulvT7fqt0yHWzpJwI57MezWnYDVpCAYBVuYst/L+fAY= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e h1:3GIlrlVLfkoipSReOMNAgApI0ajnalyLa/EZHHca/XI= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899 h1:DZhuSZLsGlFL4CmhA8BcRA0mnthyA/nZ00AqCUo7vHg= +golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/ini.v1 v1.41.0 h1:Ka3ViY6gNYSKiVy71zXBEqKplnV35ImDLVG+8uoIklE= -gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= +gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/hook-reader.go b/hook-reader.go index f23aec754..f251c1e95 100644 --- a/hook-reader.go +++ b/hook-reader.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/pkg/credentials/assume_role.go b/pkg/credentials/assume_role.go new file mode 100644 index 000000000..cc88a9ab6 --- /dev/null +++ b/pkg/credentials/assume_role.go @@ -0,0 +1,214 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/hex" + "encoding/xml" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/signer" + sha256 "github.com/minio/sha256-simd" +) + +// AssumeRoleResponse contains the result of successful AssumeRole request. +type AssumeRoleResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse" json:"-"` + + Result AssumeRoleResult `xml:"AssumeRoleResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// AssumeRoleResult - Contains the response to a successful AssumeRole +// request, including temporary credentials that can be used to make +// MinIO API requests. +type AssumeRoleResult struct { + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize int `xml:",omitempty"` +} + +// A STSAssumeRole retrieves credentials from MinIO service, and keeps track if +// those credentials are expired. +type STSAssumeRole struct { + Expiry + + // Required http Client to use when connecting to MinIO STS service. + Client *http.Client + + // STS endpoint to fetch STS credentials. + STSEndpoint string + + // various options for this request. + Options STSAssumeRoleOptions +} + +// STSAssumeRoleOptions collection of various input options +// to obtain AssumeRole credentials. +type STSAssumeRoleOptions struct { + // Mandatory inputs. + AccessKey string + SecretKey string + + Location string // Optional commonly needed with AWS STS. + DurationSeconds int // Optional defaults to 1 hour. + + // Optional only valid if using with AWS STS + RoleARN string + RoleSessionName string +} + +// NewSTSAssumeRole returns a pointer to a new +// Credentials object wrapping the STSAssumeRole. +func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) { + if stsEndpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if opts.AccessKey == "" || opts.SecretKey == "" { + return nil, errors.New("AssumeRole credentials access/secretkey is mandatory") + } + return New(&STSAssumeRole{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + STSEndpoint: stsEndpoint, + Options: opts, + }), nil +} + +const defaultDurationSeconds = 3600 + +// closeResponse close non nil response with any response Body. +// convenient wrapper to drain any remaining data on response body. +// +// Subsequently this allows golang http RoundTripper +// to re-use the same connection for future requests. +func closeResponse(resp *http.Response) { + // Callers should close resp.Body when done reading from it. + // If resp.Body is not closed, the Client's underlying RoundTripper + // (typically Transport) may not be able to re-use a persistent TCP + // connection to the server for a subsequent "keep-alive" request. + if resp != nil && resp.Body != nil { + // Drain any remaining Body and then close the connection. + // Without this closing connection would disallow re-using + // the same connection for future uses. + // - http://stackoverflow.com/a/17961593/4465767 + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } +} + +func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssumeRoleOptions) (AssumeRoleResponse, error) { + v := url.Values{} + v.Set("Action", "AssumeRole") + v.Set("Version", "2011-06-15") + if opts.RoleARN != "" { + v.Set("RoleArn", opts.RoleARN) + } + if opts.RoleSessionName != "" { + v.Set("RoleSessionName", opts.RoleSessionName) + } + if opts.DurationSeconds > defaultDurationSeconds { + v.Set("DurationSeconds", strconv.Itoa(opts.DurationSeconds)) + } else { + v.Set("DurationSeconds", strconv.Itoa(defaultDurationSeconds)) + } + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleResponse{}, err + } + u.Path = "/" + + postBody := strings.NewReader(v.Encode()) + hash := sha256.New() + if _, err = io.Copy(hash, postBody); err != nil { + return AssumeRoleResponse{}, err + } + postBody.Seek(0, 0) + + req, err := http.NewRequest(http.MethodPost, u.String(), postBody) + if err != nil { + return AssumeRoleResponse{}, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil))) + req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location) + + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleResponse{}, err + } + defer closeResponse(resp) + if resp.StatusCode != http.StatusOK { + return AssumeRoleResponse{}, errors.New(resp.Status) + } + + a := AssumeRoleResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { + return AssumeRoleResponse{}, err + } + return a, nil +} + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSAssumeRole) Retrieve() (Value, error) { + a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + SignerType: SignatureV4, + }, nil +} diff --git a/pkg/credentials/chain.go b/pkg/credentials/chain.go index e29826f48..6dc8e9d05 100644 --- a/pkg/credentials/chain.go +++ b/pkg/credentials/chain.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/pkg/credentials/chain_test.go b/pkg/credentials/chain_test.go index d26e376ff..2e18c29f2 100644 --- a/pkg/credentials/chain_test.go +++ b/pkg/credentials/chain_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/pkg/credentials/config.json.sample b/pkg/credentials/config.json.sample index 130746f4b..d793c9e0e 100644 --- a/pkg/credentials/config.json.sample +++ b/pkg/credentials/config.json.sample @@ -2,7 +2,7 @@ "version": "8", "hosts": { "play": { - "url": "https://play.minio.io:9000", + "url": "https://play.min.io", "accessKey": "Q3AM3UQ867SPQQA43P2F", "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", "api": "S3v2" diff --git a/pkg/credentials/credentials.go b/pkg/credentials/credentials.go index 4bfdad413..1a48751b5 100644 --- a/pkg/credentials/credentials.go +++ b/pkg/credentials/credentials.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/pkg/credentials/credentials_test.go b/pkg/credentials/credentials_test.go index 92c77c4cb..57ea67983 100644 --- a/pkg/credentials/credentials_test.go +++ b/pkg/credentials/credentials_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/pkg/credentials/doc.go b/pkg/credentials/doc.go index c48784ba8..0c94477b7 100644 --- a/pkg/credentials/doc.go +++ b/pkg/credentials/doc.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/pkg/credentials/env_aws.go b/pkg/credentials/env_aws.go index f9b2cc33a..b6e60d0e1 100644 --- a/pkg/credentials/env_aws.go +++ b/pkg/credentials/env_aws.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/pkg/credentials/env_minio.go b/pkg/credentials/env_minio.go index d72e77185..5f1ae0d25 100644 --- a/pkg/credentials/env_minio.go +++ b/pkg/credentials/env_minio.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/pkg/credentials/env_test.go b/pkg/credentials/env_test.go index 09cd77f7a..2f982ae90 100644 --- a/pkg/credentials/env_test.go +++ b/pkg/credentials/env_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/pkg/credentials/file_aws_credentials.go b/pkg/credentials/file_aws_credentials.go index d61f197db..ccc8251f4 100644 --- a/pkg/credentials/file_aws_credentials.go +++ b/pkg/credentials/file_aws_credentials.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -36,12 +36,12 @@ type FileAWSCredentials struct { // env value is empty will default to current user's home directory. // Linux/OSX: "$HOME/.aws/credentials" // Windows: "%USERPROFILE%\.aws\credentials" - filename string + Filename string // AWS Profile to extract credentials from the shared credentials file. If empty // will default to environment variable "AWS_PROFILE" or "default" if // environment variable is also not set. - profile string + Profile string // retrieved states if the credentials have been successfully retrieved. retrieved bool @@ -51,34 +51,34 @@ type FileAWSCredentials struct { // wrapping the Profile file provider. func NewFileAWSCredentials(filename string, profile string) *Credentials { return New(&FileAWSCredentials{ - filename: filename, - profile: profile, + Filename: filename, + Profile: profile, }) } // Retrieve reads and extracts the shared credentials from the current // users home directory. func (p *FileAWSCredentials) Retrieve() (Value, error) { - if p.filename == "" { - p.filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") - if p.filename == "" { + if p.Filename == "" { + p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") + if p.Filename == "" { homeDir, err := homedir.Dir() if err != nil { return Value{}, err } - p.filename = filepath.Join(homeDir, ".aws", "credentials") + p.Filename = filepath.Join(homeDir, ".aws", "credentials") } } - if p.profile == "" { - p.profile = os.Getenv("AWS_PROFILE") - if p.profile == "" { - p.profile = "default" + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + if p.Profile == "" { + p.Profile = "default" } } p.retrieved = false - iniProfile, err := loadProfile(p.filename, p.profile) + iniProfile, err := loadProfile(p.Filename, p.Profile) if err != nil { return Value{}, err } diff --git a/pkg/credentials/file_minio_client.go b/pkg/credentials/file_minio_client.go index 6a6827e37..ca6db005b 100644 --- a/pkg/credentials/file_minio_client.go +++ b/pkg/credentials/file_minio_client.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,12 +18,12 @@ package credentials import ( - "encoding/json" "io/ioutil" "os" "path/filepath" "runtime" + jsoniter "github.com/json-iterator/go" homedir "github.com/mitchellh/go-homedir" ) @@ -38,12 +38,12 @@ type FileMinioClient struct { // env value is empty will default to current user's home directory. // Linux/OSX: "$HOME/.mc/config.json" // Windows: "%USERALIAS%\mc\config.json" - filename string + Filename string - // Minio Alias to extract credentials from the shared credentials file. If empty + // MinIO Alias to extract credentials from the shared credentials file. If empty // will default to environment variable "MINIO_ALIAS" or "default" if // environment variable is also not set. - alias string + Alias string // retrieved states if the credentials have been successfully retrieved. retrieved bool @@ -53,39 +53,39 @@ type FileMinioClient struct { // wrapping the Alias file provider. func NewFileMinioClient(filename string, alias string) *Credentials { return New(&FileMinioClient{ - filename: filename, - alias: alias, + Filename: filename, + Alias: alias, }) } // Retrieve reads and extracts the shared credentials from the current // users home directory. func (p *FileMinioClient) Retrieve() (Value, error) { - if p.filename == "" { + if p.Filename == "" { if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok { - p.filename = value + p.Filename = value } else { homeDir, err := homedir.Dir() if err != nil { return Value{}, err } - p.filename = filepath.Join(homeDir, ".mc", "config.json") + p.Filename = filepath.Join(homeDir, ".mc", "config.json") if runtime.GOOS == "windows" { - p.filename = filepath.Join(homeDir, "mc", "config.json") + p.Filename = filepath.Join(homeDir, "mc", "config.json") } } } - if p.alias == "" { - p.alias = os.Getenv("MINIO_ALIAS") - if p.alias == "" { - p.alias = "s3" + if p.Alias == "" { + p.Alias = os.Getenv("MINIO_ALIAS") + if p.Alias == "" { + p.Alias = "s3" } } p.retrieved = false - hostCfg, err := loadAlias(p.filename, p.alias) + hostCfg, err := loadAlias(p.Filename, p.Alias) if err != nil { return Value{}, err } @@ -122,6 +122,8 @@ type config struct { // returned if it fails to read from the file. func loadAlias(filename, alias string) (hostConfig, error) { cfg := &config{} + var json = jsoniter.ConfigCompatibleWithStandardLibrary + configBytes, err := ioutil.ReadFile(filename) if err != nil { return hostConfig{}, err diff --git a/pkg/credentials/file_test.go b/pkg/credentials/file_test.go index c85c10494..598c3f52e 100644 --- a/pkg/credentials/file_test.go +++ b/pkg/credentials/file_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/pkg/credentials/iam_aws.go b/pkg/credentials/iam_aws.go index 05b2a8bb4..ceeab84dd 100644 --- a/pkg/credentials/iam_aws.go +++ b/pkg/credentials/iam_aws.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,14 +19,17 @@ package credentials import ( "bufio" - "encoding/json" "errors" "fmt" + "io/ioutil" + "net" "net/http" "net/url" "os" "path" "time" + + jsoniter "github.com/json-iterator/go" ) // DefaultExpiryWindow - Default expiry window. @@ -53,20 +56,10 @@ type IAM struct { const ( defaultIAMRoleEndpoint = "http://169.254.169.254" defaultECSRoleEndpoint = "http://169.254.170.2" - defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials" + defaultSTSRoleEndpoint = "https://sts.amazonaws.com" + defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/" ) -// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html -func getEndpoint(endpoint string) (string, bool) { - if endpoint != "" { - return endpoint, os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") != "" - } - if ecsURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"); ecsURI != "" { - return fmt.Sprintf("%s%s", defaultECSRoleEndpoint, ecsURI), true - } - return defaultIAMRoleEndpoint, false -} - // NewIAM returns a pointer to a new Credentials object wrapping the IAM. func NewIAM(endpoint string) *Credentials { p := &IAM{ @@ -82,14 +75,68 @@ func NewIAM(endpoint string) *Credentials { // Error will be returned if the request fails, or unable to extract // the desired func (m *IAM) Retrieve() (Value, error) { - endpoint, isEcsTask := getEndpoint(m.endpoint) var roleCreds ec2RoleCredRespBody var err error - if isEcsTask { + + endpoint := m.endpoint + switch { + case len(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) > 0: + if len(endpoint) == 0 { + if len(os.Getenv("AWS_REGION")) > 0 { + endpoint = "https://sts." + os.Getenv("AWS_REGION") + ".amazonaws.com" + } else { + endpoint = defaultSTSRoleEndpoint + } + } + + creds := &STSWebIdentity{ + Client: m.Client, + stsEndpoint: endpoint, + roleARN: os.Getenv("AWS_ROLE_ARN"), + roleSessionName: os.Getenv("AWS_ROLE_SESSION_NAME"), + getWebIDTokenExpiry: func() (*WebIdentityToken, error) { + token, err := ioutil.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) + if err != nil { + return nil, err + } + + return &WebIdentityToken{Token: string(token)}, nil + }, + } + + stsWebIdentityCreds, err := creds.Retrieve() + if err == nil { + m.SetExpiration(creds.Expiration(), DefaultExpiryWindow) + } + return stsWebIdentityCreds, err + + case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")) > 0: + if len(endpoint) == 0 { + endpoint = fmt.Sprintf("%s%s", defaultECSRoleEndpoint, + os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")) + } + roleCreds, err = getEcsTaskCredentials(m.Client, endpoint) - } else { + + case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")) > 0: + if len(endpoint) == 0 { + endpoint = os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI") + + var ok bool + if ok, err = isLoopback(endpoint); !ok { + if err == nil { + err = fmt.Errorf("uri host is not a loopback address: %s", endpoint) + } + break + } + } + + roleCreds, err = getEcsTaskCredentials(m.Client, endpoint) + + default: roleCreds, err = getCredentials(m.Client, endpoint) } + if err != nil { return Value{}, err } @@ -126,6 +173,10 @@ type ec2RoleCredRespBody struct { // be sent to fetch the rolling access credentials. // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html func getIAMRoleURL(endpoint string) (*url.URL, error) { + if endpoint == "" { + endpoint = defaultIAMRoleEndpoint + } + u, err := url.Parse(endpoint) if err != nil { return nil, err @@ -139,7 +190,7 @@ func getIAMRoleURL(endpoint string) (*url.URL, error) { // or there is an error making or receiving the request. // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html func listRoleNames(client *http.Client, u *url.URL) ([]string, error) { - req, err := http.NewRequest("GET", u.String(), nil) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return nil, err } @@ -166,7 +217,7 @@ func listRoleNames(client *http.Client, u *url.URL) ([]string, error) { } func getEcsTaskCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { - req, err := http.NewRequest("GET", endpoint, nil) + req, err := http.NewRequest(http.MethodGet, endpoint, nil) if err != nil { return ec2RoleCredRespBody{}, err } @@ -181,7 +232,7 @@ func getEcsTaskCredentials(client *http.Client, endpoint string) (ec2RoleCredRes } respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil { return ec2RoleCredRespBody{}, err } @@ -222,7 +273,7 @@ func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access // u.Path = path.Join(u.Path, roleName) - req, err := http.NewRequest("GET", u.String(), nil) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return ec2RoleCredRespBody{}, err } @@ -237,7 +288,7 @@ func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, } respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil { return ec2RoleCredRespBody{}, err } @@ -248,3 +299,28 @@ func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, return respCreds, nil } + +// isLoopback identifies if a uri's host is on a loopback address +func isLoopback(uri string) (bool, error) { + u, err := url.Parse(uri) + if err != nil { + return false, err + } + + host := u.Hostname() + if len(host) == 0 { + return false, fmt.Errorf("can't parse host from uri: %s", uri) + } + + ips, err := net.LookupHost(host) + if err != nil { + return false, err + } + for _, ip := range ips { + if !net.ParseIP(ip).IsLoopback() { + return false, nil + } + } + + return true, nil +} diff --git a/pkg/credentials/iam_aws_test.go b/pkg/credentials/iam_aws_test.go index 4dbbb0a13..a7c3d3368 100644 --- a/pkg/credentials/iam_aws_test.go +++ b/pkg/credentials/iam_aws_test.go @@ -1,6 +1,8 @@ +// +build !windows + /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,6 +21,7 @@ package credentials import ( "fmt" + "io/ioutil" "net/http" "net/http/httptest" "os" @@ -49,6 +52,27 @@ const credsRespEcsTaskTmpl = `{ "Expiration" : "%s" }` +const credsRespStsImpl = ` + + amzn1.account.AF6RHO7KZU5XRVQJGXK6HB56KR2A + client.5498841531868486423.1548@apps.example.com + + arn:aws:sts::123456789012:assumed-role/FederatedWebIdentityRole/app1 + AROACLKWSDQRAOEXAMPLE:app1 + + + token + secret + %s + accessKey + + www.amazon.com + + + ad4156e9-bce1-11e2-82e6-6b6efEXAMPLE + +` + func initTestFailServer() *httptest.Server { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.Error(w, "Not allowed", http.StatusBadRequest) @@ -65,11 +89,11 @@ func initTestServerNoRoles() *httptest.Server { func initTestServer(expireOn string, failAssume bool) *httptest.Server { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/latest/meta-data/iam/security-credentials" { + if r.URL.Path == "/latest/meta-data/iam/security-credentials/" { fmt.Fprintln(w, "RoleName") } else if r.URL.Path == "/latest/meta-data/iam/security-credentials/RoleName" { if failAssume { - fmt.Fprintf(w, credsFailRespTmpl) + fmt.Fprint(w, credsFailRespTmpl) } else { fmt.Fprintf(w, credsRespTmpl, expireOn) } @@ -89,15 +113,28 @@ func initEcsTaskTestServer(expireOn string) *httptest.Server { return server } +func initStsTestServer(expireOn string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + required := []string{"RoleArn", "RoleSessionName", "WebIdentityToken", "Version"} + for _, field := range required { + if _, ok := r.URL.Query()[field]; !ok { + http.Error(w, fmt.Sprintf("%s missing", field), http.StatusBadRequest) + return + } + } + + fmt.Fprintf(w, credsRespStsImpl, expireOn) + })) + + return server +} + func TestIAMMalformedEndpoint(t *testing.T) { creds := NewIAM("%%%%") _, err := creds.Get() if err == nil { t.Fatal("Unexpected should fail here") } - if err.Error() != `parse %%%%: invalid URL escape "%%%"` { - t.Fatalf("Expected parse %%%%%%%%: invalid URL escape \"%%%%%%\", got %s", err) - } } func TestIAMFailServer(t *testing.T) { @@ -241,3 +278,74 @@ func TestEcsTask(t *testing.T) { t.Error("Expected creds to be expired.") } } + +func TestEcsTaskFullURI(t *testing.T) { + server := initEcsTaskTestServer("2014-12-16T01:51:37Z") + defer server.Close() + p := &IAM{ + Client: http.DefaultClient, + } + os.Setenv("AWS_CONTAINER_CREDENTIALS_FULL_URI", + fmt.Sprintf("%s%s", server.URL, "/v2/credentials?id=task_credential_id")) + creds, err := p.Retrieve() + os.Unsetenv("AWS_CONTAINER_CREDENTIALS_FULL_URI") + if err != nil { + t.Errorf("Unexpected failure %s", err) + } + if "accessKey" != creds.AccessKeyID { + t.Errorf("Expected \"accessKey\", got %s", creds.AccessKeyID) + } + + if "secret" != creds.SecretAccessKey { + t.Errorf("Expected \"secret\", got %s", creds.SecretAccessKey) + } + + if "token" != creds.SessionToken { + t.Errorf("Expected \"token\", got %s", creds.SessionToken) + } + + if !p.IsExpired() { + t.Error("Expected creds to be expired.") + } +} + +func TestSts(t *testing.T) { + server := initStsTestServer("2014-12-16T01:51:37Z") + defer server.Close() + p := &IAM{ + Client: http.DefaultClient, + endpoint: server.URL, + } + + f, err := ioutil.TempFile("", "minio-go") + if err != nil { + t.Errorf("Unexpected failure %s", err) + } + defer os.Remove(f.Name()) + f.Write([]byte("token")) + f.Close() + + os.Setenv("AWS_WEB_IDENTITY_TOKEN_FILE", f.Name()) + os.Setenv("AWS_ROLE_ARN", "arn:aws:sts::123456789012:assumed-role/FederatedWebIdentityRole/app1") + creds, err := p.Retrieve() + os.Unsetenv("AWS_WEB_IDENTITY_TOKEN_FILE") + os.Unsetenv("AWS_ROLE_ARN") + if err != nil { + t.Errorf("Unexpected failure %s", err) + } + if "accessKey" != creds.AccessKeyID { + t.Errorf("Expected \"accessKey\", got %s", creds.AccessKeyID) + } + + if "secret" != creds.SecretAccessKey { + t.Errorf("Expected \"secret\", got %s", creds.SecretAccessKey) + } + + if "token" != creds.SessionToken { + t.Errorf("Expected \"token\", got %s", creds.SessionToken) + } + + if !p.IsExpired() { + t.Error("Expected creds to be expired.") + } +} diff --git a/pkg/credentials/signature-type.go b/pkg/credentials/signature-type.go index 1b768e8c3..b79433305 100644 --- a/pkg/credentials/signature-type.go +++ b/pkg/credentials/signature-type.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/pkg/credentials/static.go b/pkg/credentials/static.go index 8b0ba711c..7dde00b0a 100644 --- a/pkg/credentials/static.go +++ b/pkg/credentials/static.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/pkg/credentials/static_test.go b/pkg/credentials/static_test.go index f1d2d856c..65bec0565 100644 --- a/pkg/credentials/static_test.go +++ b/pkg/credentials/static_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/pkg/credentials/sts_client_grants.go b/pkg/credentials/sts_client_grants.go index 0ca307ab8..e89d5d4d8 100644 --- a/pkg/credentials/sts_client_grants.go +++ b/pkg/credentials/sts_client_grants.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -43,7 +43,7 @@ type AssumeRoleWithClientGrantsResponse struct { } // ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants -// request, including temporary credentials that can be used to make Minio API requests. +// request, including temporary credentials that can be used to make MinIO API requests. type ClientGrantsResult struct { AssumedRoleUser AssumedRoleUser `xml:",omitempty"` Audience string `xml:",omitempty"` @@ -64,15 +64,15 @@ type ClientGrantsToken struct { Expiry int } -// A STSClientGrants retrieves credentials from Minio service, and keeps track if +// A STSClientGrants retrieves credentials from MinIO service, and keeps track if // those credentials are expired. type STSClientGrants struct { Expiry - // Required http Client to use when connecting to Minio STS service. + // Required http Client to use when connecting to MinIO STS service. Client *http.Client - // Minio endpoint to fetch STS credentials. + // MinIO endpoint to fetch STS credentials. stsEndpoint string // getClientGrantsTokenExpiry function to retrieve tokens @@ -122,7 +122,7 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string, } u.RawQuery = v.Encode() - req, err := http.NewRequest("POST", u.String(), nil) + req, err := http.NewRequest(http.MethodPost, u.String(), nil) if err != nil { return AssumeRoleWithClientGrantsResponse{}, err } @@ -142,7 +142,7 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string, return a, nil } -// Retrieve retrieves credentials from the Minio service. +// Retrieve retrieves credentials from the MinIO service. // Error will be returned if the request fails. func (m *STSClientGrants) Retrieve() (Value, error) { a, err := getClientGrantsCredentials(m.Client, m.stsEndpoint, m.getClientGrantsTokenExpiry) diff --git a/pkg/credentials/sts_ldap_identity.go b/pkg/credentials/sts_ldap_identity.go new file mode 100644 index 000000000..abbf61641 --- /dev/null +++ b/pkg/credentials/sts_ldap_identity.go @@ -0,0 +1,119 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/xml" + "errors" + "net/http" + "net/url" + "time" +) + +// AssumeRoleWithLDAPResponse contains the result of successful +// AssumeRoleWithLDAPIdentity request +type AssumeRoleWithLDAPResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse" json:"-"` + Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// LDAPIdentityResult - contains credentials for a successful +// AssumeRoleWithLDAPIdentity request. +type LDAPIdentityResult struct { + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + + SubjectFromToken string `xml:",omitempty"` +} + +// LDAPIdentity retrieves credentials from MinIO +type LDAPIdentity struct { + Expiry + + stsEndpoint string + + ldapUsername, ldapPassword string +} + +// NewLDAPIdentity returns new credentials object that uses LDAP +// Identity. +func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string) (*Credentials, error) { + return New(&LDAPIdentity{ + stsEndpoint: stsEndpoint, + ldapUsername: ldapUsername, + ldapPassword: ldapPassword, + }), nil +} + +// Retrieve gets the credential by calling the MinIO STS API for +// LDAP on the configured stsEndpoint. +func (k *LDAPIdentity) Retrieve() (value Value, err error) { + u, kerr := url.Parse(k.stsEndpoint) + if kerr != nil { + err = kerr + return + } + + clnt := &http.Client{Transport: http.DefaultTransport} + v := url.Values{} + v.Set("Action", "AssumeRoleWithLDAPIdentity") + v.Set("Version", "2011-06-15") + v.Set("LDAPUsername", k.ldapUsername) + v.Set("LDAPPassword", k.ldapPassword) + + u.RawQuery = v.Encode() + + req, kerr := http.NewRequest(http.MethodPost, u.String(), nil) + if kerr != nil { + err = kerr + return + } + + resp, kerr := clnt.Do(req) + if kerr != nil { + err = kerr + return + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + err = errors.New(resp.Status) + return + } + + r := AssumeRoleWithLDAPResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil { + return + } + + cr := r.Result.Credentials + k.SetExpiration(cr.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: cr.AccessKey, + SecretAccessKey: cr.SecretKey, + SessionToken: cr.SessionToken, + SignerType: SignatureV4, + }, nil +} diff --git a/pkg/credentials/sts_web_identity.go b/pkg/credentials/sts_web_identity.go index 81a488711..5a5f6405e 100644 --- a/pkg/credentials/sts_web_identity.go +++ b/pkg/credentials/sts_web_identity.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ import ( "fmt" "net/http" "net/url" + "strconv" "time" ) @@ -36,7 +37,7 @@ type AssumeRoleWithWebIdentityResponse struct { } // WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity -// request, including temporary credentials that can be used to make Minio API requests. +// request, including temporary credentials that can be used to make MinIO API requests. type WebIdentityResult struct { AssumedRoleUser AssumedRoleUser `xml:",omitempty"` Audience string `xml:",omitempty"` @@ -57,15 +58,15 @@ type WebIdentityToken struct { Expiry int } -// A STSWebIdentity retrieves credentials from Minio service, and keeps track if +// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if // those credentials are expired. type STSWebIdentity struct { Expiry - // Required http Client to use when connecting to Minio STS service. + // Required http Client to use when connecting to MinIO STS service. Client *http.Client - // Minio endpoint to fetch STS credentials. + // MinIO endpoint to fetch STS credentials. stsEndpoint string // getWebIDTokenExpiry function which returns ID tokens @@ -75,6 +76,13 @@ type STSWebIdentity struct { // this token. // This is a customer provided function and is mandatory. getWebIDTokenExpiry func() (*WebIdentityToken, error) + + // roleARN is the Amazon Resource Name (ARN) of the role that the caller is + // assuming. + roleARN string + + // roleSessionName is the identifier for the assumed role session. + roleSessionName string } // NewSTSWebIdentity returns a pointer to a new @@ -95,7 +103,7 @@ func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdent }), nil } -func getWebIdentityCredentials(clnt *http.Client, endpoint string, +func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (AssumeRoleWithWebIdentityResponse, error) { idToken, err := getWebIDTokenExpiry() if err != nil { @@ -104,8 +112,18 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint string, v := url.Values{} v.Set("Action", "AssumeRoleWithWebIdentity") + if len(roleARN) > 0 { + v.Set("RoleArn", roleARN) + + if len(roleSessionName) == 0 { + roleSessionName = strconv.FormatInt(time.Now().UnixNano(), 10) + } + v.Set("RoleSessionName", roleSessionName) + } v.Set("WebIdentityToken", idToken.Token) - v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry)) + if idToken.Expiry > 0 { + v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry)) + } v.Set("Version", "2011-06-15") u, err := url.Parse(endpoint) @@ -115,7 +133,7 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint string, u.RawQuery = v.Encode() - req, err := http.NewRequest("POST", u.String(), nil) + req, err := http.NewRequest(http.MethodPost, u.String(), nil) if err != nil { return AssumeRoleWithWebIdentityResponse{}, err } @@ -138,10 +156,10 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint string, return a, nil } -// Retrieve retrieves credentials from the Minio service. +// Retrieve retrieves credentials from the MinIO service. // Error will be returned if the request fails. func (m *STSWebIdentity) Retrieve() (Value, error) { - a, err := getWebIdentityCredentials(m.Client, m.stsEndpoint, m.getWebIDTokenExpiry) + a, err := getWebIdentityCredentials(m.Client, m.stsEndpoint, m.roleARN, m.roleSessionName, m.getWebIDTokenExpiry) if err != nil { return Value{}, err } @@ -156,3 +174,8 @@ func (m *STSWebIdentity) Retrieve() (Value, error) { SignerType: SignatureV4, }, nil } + +// Expiration returns the expiration time of the credentials +func (m *STSWebIdentity) Expiration() time.Time { + return m.expiration +} diff --git a/pkg/encrypt/server-side.go b/pkg/encrypt/server-side.go index 2d3c70f00..5276f63fc 100644 --- a/pkg/encrypt/server-side.go +++ b/pkg/encrypt/server-side.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2018 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,10 +20,10 @@ package encrypt import ( "crypto/md5" "encoding/base64" - "encoding/json" "errors" "net/http" + jsoniter "github.com/json-iterator/go" "golang.org/x/crypto/argon2" ) @@ -101,6 +101,7 @@ func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) { if context == nil { return kms{key: keyID, hasContext: false}, nil } + var json = jsoniter.ConfigCompatibleWithStandardLibrary serializedContext, err := json.Marshal(context) if err != nil { return nil, err @@ -188,7 +189,9 @@ func (s kms) Type() Type { return KMS } func (s kms) Marshal(h http.Header) { h.Set(sseGenericHeader, "aws:kms") - h.Set(sseKmsKeyID, s.key) + if s.key != "" { + h.Set(sseKmsKeyID, s.key) + } if s.hasContext { h.Set(sseEncryptionContext, base64.StdEncoding.EncodeToString(s.context)) } diff --git a/pkg/lifecycle/lifecycle.go b/pkg/lifecycle/lifecycle.go new file mode 100644 index 000000000..3493003f7 --- /dev/null +++ b/pkg/lifecycle/lifecycle.go @@ -0,0 +1,282 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package lifecycle contains all the lifecycle related data types and marshallers. +package lifecycle + +import ( + "encoding/xml" + "time" +) + +// AbortIncompleteMultipartUpload structure, not supported yet on MinIO +type AbortIncompleteMultipartUpload struct { + XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"` + DaysAfterInitiation ExpirationDays `xml:"DaysAfterInitiation,omitempty" json:"DaysAfterInitiation,omitempty"` +} + +// IsDaysNull returns true if days field is null +func (n AbortIncompleteMultipartUpload) IsDaysNull() bool { + return n.DaysAfterInitiation == ExpirationDays(0) +} + +// MarshalXML if days after initiation is set to non-zero value +func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.IsDaysNull() { + return nil + } + type abortIncompleteMultipartUploadWrapper AbortIncompleteMultipartUpload + return e.EncodeElement(abortIncompleteMultipartUploadWrapper(n), start) +} + +// NoncurrentVersionExpiration - Specifies when noncurrent object versions expire. +// Upon expiration, server permanently deletes the noncurrent object versions. +// Set this lifecycle configuration action on a bucket that has versioning enabled +// (or suspended) to request server delete noncurrent object versions at a +// specific period in the object's lifetime. +type NoncurrentVersionExpiration struct { + XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"` +} + +// NoncurrentVersionTransition structure, set this action to request server to +// transition noncurrent object versions to different set storage classes +// at a specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"` + StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"` +} + +// MarshalXML if non-current days not set to non zero value +func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.IsDaysNull() { + return nil + } + type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration + return e.EncodeElement(noncurrentVersionExpirationWrapper(n), start) +} + +// IsDaysNull returns true if days field is null +func (n NoncurrentVersionExpiration) IsDaysNull() bool { + return n.NoncurrentDays == ExpirationDays(0) +} + +// MarshalXML is extended to leave out +// tags +func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.NoncurrentDays == ExpirationDays(0) { + return nil + } + return e.EncodeElement(&n, start) +} + +// Tag structure key/value pair representing an object tag to apply lifecycle configuration +type Tag struct { + XMLName xml.Name `xml:"Tag,omitempty" json:"-"` + Key string `xml:"Key,omitempty" json:"Key,omitempty"` + Value string `xml:"Value,omitempty" json:"Value,omitempty"` +} + +// IsEmpty returns whether this tag is empty or not. +func (tag Tag) IsEmpty() bool { + return tag.Key == "" +} + +// Transition structure - transition details of lifecycle configuration +type Transition struct { + XMLName xml.Name `xml:"Transition" json:"-"` + Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` + StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` + Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"` +} + +// IsDaysNull returns true if days field is null +func (t Transition) IsDaysNull() bool { + return t.Days == ExpirationDays(0) +} + +// IsDateNull returns true if date field is null +func (t Transition) IsDateNull() bool { + return t.Date.Time.IsZero() +} + +// IsNull returns true if both date and days fields are null +func (t Transition) IsNull() bool { + return t.IsDaysNull() && t.IsDateNull() +} + +// MarshalXML is transition is non null +func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error { + if t.IsNull() { + return nil + } + type transitionWrapper Transition + return en.EncodeElement(transitionWrapper(t), startElement) +} + +// And And Rule for LifecycleTag, to be used in LifecycleRuleFilter +type And struct { + XMLName xml.Name `xml:"And,omitempty" json:"-"` + Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` + Tags []Tag `xml:"Tag,omitempty" json:"Tags,omitempty"` +} + +// IsEmpty returns true if Tags field is null +func (a And) IsEmpty() bool { + return len(a.Tags) == 0 && a.Prefix == "" +} + +// Filter will be used in selecting rule(s) for lifecycle configuration +type Filter struct { + XMLName xml.Name `xml:"Filter" json:"-"` + And And `xml:"And,omitempty" json:"And,omitempty"` + Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` + Tag Tag `xml:"Tag,omitempty" json:"-"` +} + +// MarshalXML - produces the xml representation of the Filter struct +// only one of Prefix, And and Tag should be present in the output. +func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + switch { + case !f.And.IsEmpty(): + if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil { + return err + } + case !f.Tag.IsEmpty(): + if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil { + return err + } + default: + // Always print Prefix field when both And & Tag are empty + if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// ExpirationDays is a type alias to unmarshal Days in Expiration +type ExpirationDays int + +// MarshalXML encodes number of days to expire if it is non-zero and +// encodes empty string otherwise +func (eDays ExpirationDays) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if eDays == 0 { + return nil + } + return e.EncodeElement(int(eDays), startElement) +} + +// ExpirationDate is a embedded type containing time.Time to unmarshal +// Date in Expiration +type ExpirationDate struct { + time.Time +} + +// MarshalXML encodes expiration date if it is non-zero and encodes +// empty string otherwise +func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if eDate.Time.IsZero() { + return nil + } + return e.EncodeElement(eDate.Format(time.RFC3339), startElement) +} + +// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element. +type ExpireDeleteMarker bool + +// MarshalXML encodes delete marker boolean into an XML form. +func (b ExpireDeleteMarker) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if !b { + return nil + } + type expireDeleteMarkerWrapper ExpireDeleteMarker + return e.EncodeElement(expireDeleteMarkerWrapper(b), startElement) +} + +// Expiration structure - expiration details of lifecycle configuration +type Expiration struct { + XMLName xml.Name `xml:"Expiration,omitempty" json:"-"` + Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` + Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"` + DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty"` +} + +// IsDaysNull returns true if days field is null +func (e Expiration) IsDaysNull() bool { + return e.Days == ExpirationDays(0) +} + +// IsDateNull returns true if date field is null +func (e Expiration) IsDateNull() bool { + return e.Date.Time.IsZero() +} + +// IsNull returns true if both date and days fields are null +func (e Expiration) IsNull() bool { + return e.IsDaysNull() && e.IsDateNull() +} + +// MarshalXML is expiration is non null +func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error { + if e.IsNull() { + return nil + } + type expirationWrapper Expiration + return en.EncodeElement(expirationWrapper(e), startElement) +} + +// Rule represents a single rule in lifecycle configuration +type Rule struct { + XMLName xml.Name `xml:"Rule,omitempty" json:"-"` + AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"` + Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"` + ID string `xml:"ID" json:"ID"` + RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"` + NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"` + NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty" json:"NoncurrentVersionTransition,omitempty"` + Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` + Status string `xml:"Status" json:"Status"` + Transition Transition `xml:"Transition,omitempty" json:"Transition,omitempty"` +} + +// Configuration is a collection of Rule objects. +type Configuration struct { + XMLName xml.Name `xml:"LifecycleConfiguration,omitempty" json:"-"` + Rules []Rule `xml:"Rule"` +} + +// Empty check if lifecycle configuration is empty +func (c *Configuration) Empty() bool { + if c == nil { + return true + } + return len(c.Rules) == 0 +} + +// NewConfiguration initializes a fresh lifecycle configuration +// for manipulation, such as setting and removing lifecycle rules +// and filters. +func NewConfiguration() *Configuration { + return &Configuration{} +} diff --git a/pkg/notification/info.go b/pkg/notification/info.go new file mode 100644 index 000000000..d0a471638 --- /dev/null +++ b/pkg/notification/info.go @@ -0,0 +1,78 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package notification + +// Indentity represents the user id, this is a compliance field. +type identity struct { + PrincipalID string `json:"principalId"` +} + +// event bucket metadata. +type bucketMeta struct { + Name string `json:"name"` + OwnerIdentity identity `json:"ownerIdentity"` + ARN string `json:"arn"` +} + +// event object metadata. +type objectMeta struct { + Key string `json:"key"` + Size int64 `json:"size,omitempty"` + ETag string `json:"eTag,omitempty"` + ContentType string `json:"contentType,omitempty"` + UserMetadata map[string]string `json:"userMetadata,omitempty"` + VersionID string `json:"versionId,omitempty"` + Sequencer string `json:"sequencer"` +} + +// event server specific metadata. +type eventMeta struct { + SchemaVersion string `json:"s3SchemaVersion"` + ConfigurationID string `json:"configurationId"` + Bucket bucketMeta `json:"bucket"` + Object objectMeta `json:"object"` +} + +// sourceInfo represents information on the client that +// triggered the event notification. +type sourceInfo struct { + Host string `json:"host"` + Port string `json:"port"` + UserAgent string `json:"userAgent"` +} + +// Event represents an Amazon an S3 bucket notification event. +type Event struct { + EventVersion string `json:"eventVersion"` + EventSource string `json:"eventSource"` + AwsRegion string `json:"awsRegion"` + EventTime string `json:"eventTime"` + EventName string `json:"eventName"` + UserIdentity identity `json:"userIdentity"` + RequestParameters map[string]string `json:"requestParameters"` + ResponseElements map[string]string `json:"responseElements"` + S3 eventMeta `json:"s3"` + Source sourceInfo `json:"source"` +} + +// Info - represents the collection of notification events, additionally +// also reports errors if any while listening on bucket notifications. +type Info struct { + Records []Event + Err error +} diff --git a/bucket-notification.go b/pkg/notification/notification.go similarity index 51% rename from bucket-notification.go rename to pkg/notification/notification.go index ea303dd9d..55c58cb3b 100644 --- a/bucket-notification.go +++ b/pkg/notification/notification.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,32 +15,34 @@ * limitations under the License. */ -package minio +package notification import ( "encoding/xml" + "errors" + "fmt" - "github.com/minio/minio-go/pkg/set" + "github.com/minio/minio-go/v7/pkg/set" ) -// NotificationEventType is a S3 notification event associated to the bucket notification configuration -type NotificationEventType string +// EventType is a S3 notification event associated to the bucket notification configuration +type EventType string // The role of all event types are described in : // http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations const ( - ObjectCreatedAll NotificationEventType = "s3:ObjectCreated:*" - ObjectCreatedPut = "s3:ObjectCreated:Put" - ObjectCreatedPost = "s3:ObjectCreated:Post" - ObjectCreatedCopy = "s3:ObjectCreated:Copy" - ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" - ObjectAccessedGet = "s3:ObjectAccessed:Get" - ObjectAccessedHead = "s3:ObjectAccessed:Head" - ObjectAccessedAll = "s3:ObjectAccessed:*" - ObjectRemovedAll = "s3:ObjectRemoved:*" - ObjectRemovedDelete = "s3:ObjectRemoved:Delete" - ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" - ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + ObjectCreatedAll EventType = "s3:ObjectCreated:*" + ObjectCreatedPut = "s3:ObjectCreated:Put" + ObjectCreatedPost = "s3:ObjectCreated:Post" + ObjectCreatedCopy = "s3:ObjectCreated:Copy" + ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + ObjectAccessedGet = "s3:ObjectAccessed:Get" + ObjectAccessedHead = "s3:ObjectAccessed:Head" + ObjectAccessedAll = "s3:ObjectAccessed:*" + ObjectRemovedAll = "s3:ObjectRemoved:*" + ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" ) // FilterRule - child of S3Key, a tag in the notification xml which @@ -81,32 +83,32 @@ func NewArn(partition, service, region, accountID, resource string) Arn { Resource: resource} } -// Return the string format of the ARN +// String returns the string format of the ARN func (arn Arn) String() string { return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource } -// NotificationConfig - represents one single notification configuration +// Config - represents one single notification configuration // such as topic, queue or lambda configuration. -type NotificationConfig struct { - ID string `xml:"Id,omitempty"` - Arn Arn `xml:"-"` - Events []NotificationEventType `xml:"Event"` - Filter *Filter `xml:"Filter,omitempty"` +type Config struct { + ID string `xml:"Id,omitempty"` + Arn Arn `xml:"-"` + Events []EventType `xml:"Event"` + Filter *Filter `xml:"Filter,omitempty"` } -// NewNotificationConfig creates one notification config and sets the given ARN -func NewNotificationConfig(arn Arn) NotificationConfig { - return NotificationConfig{Arn: arn, Filter: &Filter{}} +// NewConfig creates one notification config and sets the given ARN +func NewConfig(arn Arn) Config { + return Config{Arn: arn, Filter: &Filter{}} } // AddEvents adds one event to the current notification config -func (t *NotificationConfig) AddEvents(events ...NotificationEventType) { +func (t *Config) AddEvents(events ...EventType) { t.Events = append(t.Events, events...) } // AddFilterSuffix sets the suffix configuration to the current notification config -func (t *NotificationConfig) AddFilterSuffix(suffix string) { +func (t *Config) AddFilterSuffix(suffix string) { if t.Filter == nil { t.Filter = &Filter{} } @@ -122,7 +124,7 @@ func (t *NotificationConfig) AddFilterSuffix(suffix string) { } // AddFilterPrefix sets the prefix configuration to the current notification config -func (t *NotificationConfig) AddFilterPrefix(prefix string) { +func (t *Config) AddFilterPrefix(prefix string) { if t.Filter == nil { t.Filter = &Filter{} } @@ -137,26 +139,82 @@ func (t *NotificationConfig) AddFilterPrefix(prefix string) { t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) } +// EqualEventTypeList tells whether a and b contain the same events +func EqualEventTypeList(a, b []EventType) bool { + if len(a) != len(b) { + return false + } + setA := set.NewStringSet() + for _, i := range a { + setA.Add(string(i)) + } + + setB := set.NewStringSet() + for _, i := range b { + setB.Add(string(i)) + } + + return setA.Difference(setB).IsEmpty() +} + +// EqualFilterRuleList tells whether a and b contain the same filters +func EqualFilterRuleList(a, b []FilterRule) bool { + if len(a) != len(b) { + return false + } + + setA := set.NewStringSet() + for _, i := range a { + setA.Add(fmt.Sprintf("%s-%s", i.Name, i.Value)) + } + + setB := set.NewStringSet() + for _, i := range b { + setB.Add(fmt.Sprintf("%s-%s", i.Name, i.Value)) + } + + return setA.Difference(setB).IsEmpty() +} + +// Equal returns whether this `Config` is equal to another defined by the passed parameters +func (t *Config) Equal(events []EventType, prefix, suffix string) bool { + //Compare events + passEvents := EqualEventTypeList(t.Events, events) + + //Compare filters + var newFilter []FilterRule + if prefix != "" { + newFilter = append(newFilter, FilterRule{Name: "prefix", Value: prefix}) + } + if suffix != "" { + newFilter = append(newFilter, FilterRule{Name: "suffix", Value: suffix}) + } + + passFilters := EqualFilterRuleList(t.Filter.S3Key.FilterRules, newFilter) + // if it matches events and filters, mark the index for deletion + return passEvents && passFilters +} + // TopicConfig carries one single topic notification configuration type TopicConfig struct { - NotificationConfig + Config Topic string `xml:"Topic"` } // QueueConfig carries one single queue notification configuration type QueueConfig struct { - NotificationConfig + Config Queue string `xml:"Queue"` } // LambdaConfig carries one single cloudfunction notification configuration type LambdaConfig struct { - NotificationConfig + Config Lambda string `xml:"CloudFunction"` } -// BucketNotification - the struct that represents the whole XML to be sent to the web service -type BucketNotification struct { +// Configuration - the struct that represents the whole XML to be sent to the web service +type Configuration struct { XMLName xml.Name `xml:"NotificationConfiguration"` LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"` TopicConfigs []TopicConfig `xml:"TopicConfiguration"` @@ -164,8 +222,8 @@ type BucketNotification struct { } // AddTopic adds a given topic config to the general bucket notification config -func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) bool { - newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()} +func (b *Configuration) AddTopic(topicConfig Config) bool { + newTopicConfig := TopicConfig{Config: topicConfig, Topic: topicConfig.Arn.String()} for _, n := range b.TopicConfigs { // If new config matches existing one if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter { @@ -190,8 +248,8 @@ func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) bool { } // AddQueue adds a given queue config to the general bucket notification config -func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) bool { - newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()} +func (b *Configuration) AddQueue(queueConfig Config) bool { + newQueueConfig := QueueConfig{Config: queueConfig, Queue: queueConfig.Arn.String()} for _, n := range b.QueueConfigs { if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter { @@ -215,8 +273,8 @@ func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) bool { } // AddLambda adds a given lambda config to the general bucket notification config -func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) bool { - newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()} +func (b *Configuration) AddLambda(lambdaConfig Config) bool { + newLambdaConfig := LambdaConfig{Config: lambdaConfig, Lambda: lambdaConfig.Arn.String()} for _, n := range b.LambdaConfigs { if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter { @@ -240,7 +298,7 @@ func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) bool { } // RemoveTopicByArn removes all topic configurations that match the exact specified ARN -func (b *BucketNotification) RemoveTopicByArn(arn Arn) { +func (b *Configuration) RemoveTopicByArn(arn Arn) { var topics []TopicConfig for _, topic := range b.TopicConfigs { if topic.Topic != arn.String() { @@ -250,8 +308,28 @@ func (b *BucketNotification) RemoveTopicByArn(arn Arn) { b.TopicConfigs = topics } +// ErrNoConfigMatch is returned when a notification configuration (sqs,sns,lambda) is not found when trying to delete +var ErrNoConfigMatch = errors.New("no notification configuration matched") + +// RemoveTopicByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix +func (b *Configuration) RemoveTopicByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { + removeIndex := -1 + for i, v := range b.TopicConfigs { + // if it matches events and filters, mark the index for deletion + if v.Topic == arn.String() && v.Config.Equal(events, prefix, suffix) { + removeIndex = i + break // since we have at most one matching config + } + } + if removeIndex >= 0 { + b.TopicConfigs = append(b.TopicConfigs[:removeIndex], b.TopicConfigs[removeIndex+1:]...) + return nil + } + return ErrNoConfigMatch +} + // RemoveQueueByArn removes all queue configurations that match the exact specified ARN -func (b *BucketNotification) RemoveQueueByArn(arn Arn) { +func (b *Configuration) RemoveQueueByArn(arn Arn) { var queues []QueueConfig for _, queue := range b.QueueConfigs { if queue.Queue != arn.String() { @@ -261,8 +339,25 @@ func (b *BucketNotification) RemoveQueueByArn(arn Arn) { b.QueueConfigs = queues } +// RemoveQueueByArnEventsPrefixSuffix removes a queue configuration that match the exact specified ARN, events, prefix and suffix +func (b *Configuration) RemoveQueueByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { + removeIndex := -1 + for i, v := range b.QueueConfigs { + // if it matches events and filters, mark the index for deletion + if v.Queue == arn.String() && v.Config.Equal(events, prefix, suffix) { + removeIndex = i + break // since we have at most one matching config + } + } + if removeIndex >= 0 { + b.QueueConfigs = append(b.QueueConfigs[:removeIndex], b.QueueConfigs[removeIndex+1:]...) + return nil + } + return ErrNoConfigMatch +} + // RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN -func (b *BucketNotification) RemoveLambdaByArn(arn Arn) { +func (b *Configuration) RemoveLambdaByArn(arn Arn) { var lambdas []LambdaConfig for _, lambda := range b.LambdaConfigs { if lambda.Lambda != arn.String() { @@ -271,3 +366,20 @@ func (b *BucketNotification) RemoveLambdaByArn(arn Arn) { } b.LambdaConfigs = lambdas } + +// RemoveLambdaByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix +func (b *Configuration) RemoveLambdaByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { + removeIndex := -1 + for i, v := range b.LambdaConfigs { + // if it matches events and filters, mark the index for deletion + if v.Lambda == arn.String() && v.Config.Equal(events, prefix, suffix) { + removeIndex = i + break // since we have at most one matching config + } + } + if removeIndex >= 0 { + b.LambdaConfigs = append(b.LambdaConfigs[:removeIndex], b.LambdaConfigs[removeIndex+1:]...) + return nil + } + return ErrNoConfigMatch +} diff --git a/pkg/notification/notification_test.go b/pkg/notification/notification_test.go new file mode 100644 index 000000000..ac0db754d --- /dev/null +++ b/pkg/notification/notification_test.go @@ -0,0 +1,842 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package notification + +import ( + "encoding/xml" + "testing" +) + +func TestEqualEventTypeList(t *testing.T) { + type args struct { + a []EventType + b []EventType + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "same order", + args: args{ + a: []EventType{ObjectCreatedAll, ObjectAccessedAll}, + b: []EventType{ObjectCreatedAll, ObjectAccessedAll}, + }, + want: true, + }, + { + name: "different order", + args: args{ + a: []EventType{ObjectCreatedAll, ObjectAccessedAll}, + b: []EventType{ObjectAccessedAll, ObjectCreatedAll}, + }, + want: true, + }, + { + name: "not equal", + args: args{ + a: []EventType{ObjectCreatedAll, ObjectAccessedAll}, + b: []EventType{ObjectRemovedAll, ObjectAccessedAll}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := EqualEventTypeList(tt.args.a, tt.args.b); got != tt.want { + t.Errorf("EqualEventTypeList() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestEqualFilterRuleList(t *testing.T) { + type args struct { + a []FilterRule + b []FilterRule + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "same order", + args: args{ + a: []FilterRule{{Name: "prefix", Value: "prefix1"}, {Name: "suffix", Value: "suffix1"}}, + b: []FilterRule{{Name: "prefix", Value: "prefix1"}, {Name: "suffix", Value: "suffix1"}}, + }, + want: true, + }, + { + name: "different order", + args: args{ + a: []FilterRule{{Name: "prefix", Value: "prefix1"}, {Name: "suffix", Value: "suffix1"}}, + b: []FilterRule{{Name: "suffix", Value: "suffix1"}, {Name: "prefix", Value: "prefix1"}}, + }, + want: true, + }, + { + name: "not equal", + args: args{ + a: []FilterRule{{Name: "prefix", Value: "prefix1"}, {Name: "suffix", Value: "suffix1"}}, + b: []FilterRule{{Name: "prefix", Value: "prefix2"}, {Name: "suffix", Value: "suffix1"}}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := EqualFilterRuleList(tt.args.a, tt.args.b); got != tt.want { + t.Errorf("EqualFilterRuleList() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestConfig_Equal(t *testing.T) { + type fields struct { + ID string + Arn Arn + Events []EventType + Filter *Filter + } + type args struct { + events []EventType + prefix string + suffix string + } + tests := []struct { + name string + fields fields + args args + want bool + }{ + { + name: "same order", + fields: fields{ + Arn: NewArn("minio", "sqs", "", "1", "postgresql"), + Events: []EventType{ObjectCreatedAll, ObjectAccessedAll}, + Filter: &Filter{ + S3Key: S3Key{ + FilterRules: []FilterRule{{Name: "prefix", Value: "prefix1"}, {Name: "suffix", Value: "suffix1"}}, + }, + }, + }, + args: args{ + events: []EventType{ObjectCreatedAll, ObjectAccessedAll}, + prefix: "prefix1", + suffix: "suffix1", + }, + want: true, + }, + { + name: "different order", + fields: fields{ + Arn: NewArn("minio", "sqs", "", "1", "postgresql"), + Events: []EventType{ObjectAccessedAll, ObjectCreatedAll}, + Filter: &Filter{ + S3Key: S3Key{ + FilterRules: []FilterRule{{Name: "suffix", Value: "suffix1"}, {Name: "prefix", Value: "prefix1"}}, + }, + }, + }, + args: args{ + events: []EventType{ObjectCreatedAll, ObjectAccessedAll}, + prefix: "prefix1", + suffix: "suffix1", + }, + want: true, + }, + { + name: "not equal", + fields: fields{ + Arn: NewArn("minio", "sqs", "", "1", "postgresql"), + Events: []EventType{ObjectAccessedAll}, + Filter: &Filter{ + S3Key: S3Key{ + FilterRules: []FilterRule{{Name: "suffix", Value: "suffix1"}, {Name: "prefix", Value: "prefix1"}}, + }, + }, + }, + args: args{ + events: []EventType{ObjectCreatedAll, ObjectAccessedAll}, + prefix: "prefix1", + suffix: "suffix1", + }, + want: false, + }, + { + name: "different arn", + fields: fields{ + Events: []EventType{ObjectAccessedAll}, + Filter: &Filter{ + S3Key: S3Key{ + FilterRules: []FilterRule{{Name: "suffix", Value: "suffix1"}, {Name: "prefix", Value: "prefix1"}}, + }, + }, + }, + args: args{ + events: []EventType{ObjectCreatedAll, ObjectAccessedAll}, + prefix: "prefix1", + suffix: "suffix1", + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + nc := &Config{ + ID: tt.fields.ID, + Arn: tt.fields.Arn, + Events: tt.fields.Events, + Filter: tt.fields.Filter, + } + if got := nc.Equal(tt.args.events, tt.args.prefix, tt.args.suffix); got != tt.want { + t.Errorf("Equal() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestConfiguration_RemoveQueueByArnEventsPrefixSuffix(t *testing.T) { + type fields struct { + XMLName xml.Name + LambdaConfigs []LambdaConfig + TopicConfigs []TopicConfig + QueueConfigs []QueueConfig + } + type args struct { + arn Arn + events []EventType + prefix string + suffix string + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "Queue Configuration Removed with events, prefix", + fields: fields{ + XMLName: xml.Name{}, + LambdaConfigs: nil, + TopicConfigs: nil, + QueueConfigs: []QueueConfig{ + { + Config: Config{ + ID: "", + Arn: Arn{ + Partition: "minio", + Service: "sqs", + Region: "", + AccountID: "1", + Resource: "postgresql", + }, + Events: []EventType{ + ObjectAccessedAll, + }, + Filter: &Filter{ + S3Key: S3Key{ + FilterRules: []FilterRule{ + { + Name: "prefix", + Value: "x", + }, + }, + }, + }, + }, + Queue: "arn:minio:sqs::1:postgresql", + }, + }, + }, + args: args{ + arn: Arn{ + Partition: "minio", + Service: "sqs", + Region: "", + AccountID: "1", + Resource: "postgresql", + }, + events: []EventType{ + ObjectAccessedAll, + }, + prefix: "x", + suffix: "", + }, + wantErr: false, + }, + { + name: "Queue Configuration Removed with events, prefix, suffix", + fields: fields{ + XMLName: xml.Name{}, + LambdaConfigs: nil, + TopicConfigs: nil, + QueueConfigs: []QueueConfig{ + { + Config: Config{ + ID: "", + Arn: Arn{ + Partition: "minio", + Service: "sqs", + Region: "", + AccountID: "1", + Resource: "postgresql", + }, + Events: []EventType{ + ObjectAccessedAll, + }, + Filter: &Filter{ + S3Key: S3Key{ + FilterRules: []FilterRule{ + { + Name: "prefix", + Value: "x", + }, + { + Name: "suffix", + Value: "y", + }, + }, + }, + }, + }, + Queue: "arn:minio:sqs::1:postgresql", + }, + }, + }, + args: args{ + arn: Arn{ + Partition: "minio", + Service: "sqs", + Region: "", + AccountID: "1", + Resource: "postgresql", + }, + events: []EventType{ + ObjectAccessedAll, + }, + prefix: "x", + suffix: "y", + }, + wantErr: false, + }, + { + name: "Error Returned Queue Configuration Not Removed", + fields: fields{ + XMLName: xml.Name{}, + LambdaConfigs: nil, + TopicConfigs: nil, + QueueConfigs: []QueueConfig{ + { + Config: Config{ + ID: "", + Arn: Arn{ + Partition: "minio", + Service: "sqs", + Region: "", + AccountID: "1", + Resource: "postgresql", + }, + Events: []EventType{ + ObjectAccessedAll, + }, + Filter: &Filter{ + S3Key: S3Key{ + FilterRules: []FilterRule{ + { + Name: "prefix", + Value: "x", + }, + }, + }, + }, + }, + Queue: "arn:minio:sqs::1:postgresql", + }, + }, + }, + args: args{ + arn: Arn{ + Partition: "minio", + Service: "sqs", + Region: "", + AccountID: "1", + Resource: "postgresql", + }, + events: []EventType{ + ObjectAccessedAll, + }, + prefix: "", + suffix: "", + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := &Configuration{ + XMLName: tt.fields.XMLName, + LambdaConfigs: tt.fields.LambdaConfigs, + TopicConfigs: tt.fields.TopicConfigs, + QueueConfigs: tt.fields.QueueConfigs, + } + if err := b.RemoveQueueByArnEventsPrefixSuffix(tt.args.arn, tt.args.events, tt.args.prefix, tt.args.suffix); (err != nil) != tt.wantErr { + t.Errorf("RemoveQueueByArnEventsPrefixSuffix() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestConfiguration_RemoveLambdaByArnEventsPrefixSuffix(t *testing.T) { + type fields struct { + XMLName xml.Name + LambdaConfigs []LambdaConfig + TopicConfigs []TopicConfig + QueueConfigs []QueueConfig + } + type args struct { + arn Arn + events []EventType + prefix string + suffix string + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "Lambda Configuration Removed with events, prefix", + fields: fields{ + XMLName: xml.Name{}, + QueueConfigs: nil, + TopicConfigs: nil, + LambdaConfigs: []LambdaConfig{ + { + Config: Config{ + ID: "", + Arn: Arn{ + Partition: "minio", + Service: "lambda", + Region: "", + AccountID: "1", + Resource: "provider", + }, + Events: []EventType{ + ObjectAccessedAll, + }, + Filter: &Filter{ + S3Key: S3Key{ + FilterRules: []FilterRule{ + { + Name: "prefix", + Value: "x", + }, + }, + }, + }, + }, + Lambda: "arn:minio:lambda::1:provider", + }, + }, + }, + args: args{ + arn: Arn{ + Partition: "minio", + Service: "lambda", + Region: "", + AccountID: "1", + Resource: "provider", + }, + events: []EventType{ + ObjectAccessedAll, + }, + prefix: "x", + suffix: "", + }, + wantErr: false, + }, + { + name: "Lambda Configuration Removed with events, prefix, suffix", + fields: fields{ + XMLName: xml.Name{}, + QueueConfigs: nil, + TopicConfigs: nil, + LambdaConfigs: []LambdaConfig{ + { + Config: Config{ + ID: "", + Arn: Arn{ + Partition: "minio", + Service: "lambda", + Region: "", + AccountID: "1", + Resource: "provider", + }, + Events: []EventType{ + ObjectAccessedAll, + }, + Filter: &Filter{ + S3Key: S3Key{ + FilterRules: []FilterRule{ + { + Name: "prefix", + Value: "x", + }, + { + Name: "suffix", + Value: "y", + }, + }, + }, + }, + }, + Lambda: "arn:minio:lambda::1:provider", + }, + }, + }, + args: args{ + arn: Arn{ + Partition: "minio", + Service: "lambda", + Region: "", + AccountID: "1", + Resource: "provider", + }, + events: []EventType{ + ObjectAccessedAll, + }, + prefix: "x", + suffix: "y", + }, + wantErr: false, + }, + { + name: "Error Returned Lambda Configuration Not Removed", + fields: fields{ + XMLName: xml.Name{}, + QueueConfigs: nil, + TopicConfigs: nil, + LambdaConfigs: []LambdaConfig{ + { + Config: Config{ + ID: "", + Arn: Arn{ + Partition: "minio", + Service: "lambda", + Region: "", + AccountID: "1", + Resource: "provider", + }, + Events: []EventType{ + ObjectAccessedAll, + }, + Filter: &Filter{ + S3Key: S3Key{ + FilterRules: []FilterRule{ + { + Name: "prefix", + Value: "x", + }, + }, + }, + }, + }, + Lambda: "arn:minio:lambda::1:provider", + }, + }, + }, + args: args{ + arn: Arn{ + Partition: "minio", + Service: "lambda", + Region: "", + AccountID: "1", + Resource: "provider", + }, + events: []EventType{ + ObjectAccessedAll, + }, + prefix: "", + suffix: "", + }, + wantErr: true, + }, + { + name: "Error Returned Invalid ARN", + fields: fields{ + XMLName: xml.Name{}, + QueueConfigs: nil, + TopicConfigs: nil, + LambdaConfigs: []LambdaConfig{ + { + Config: Config{ + ID: "", + Arn: Arn{ + Partition: "minio", + Service: "lambda", + Region: "", + AccountID: "1", + Resource: "provider", + }, + Events: []EventType{ + ObjectAccessedAll, + }, + Filter: &Filter{ + S3Key: S3Key{ + FilterRules: []FilterRule{ + { + Name: "prefix", + Value: "x", + }, + }, + }, + }, + }, + Lambda: "arn:minio:lambda::1:provider", + }, + }, + }, + args: args{ + arn: Arn{ + Partition: "minio", + + Service: "lambda", + Region: "", + AccountID: "2", + Resource: "provider", + }, + events: []EventType{ + ObjectAccessedAll, + }, + prefix: "", + suffix: "", + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := &Configuration{ + XMLName: tt.fields.XMLName, + LambdaConfigs: tt.fields.LambdaConfigs, + TopicConfigs: tt.fields.TopicConfigs, + QueueConfigs: tt.fields.QueueConfigs, + } + if err := b.RemoveLambdaByArnEventsPrefixSuffix(tt.args.arn, tt.args.events, tt.args.prefix, tt.args.suffix); (err != nil) != tt.wantErr { + t.Errorf("RemoveLambdaByArnEventsPrefixSuffix() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestConfiguration_RemoveTopicByArnEventsPrefixSuffix(t *testing.T) { + type fields struct { + XMLName xml.Name + LambdaConfigs []LambdaConfig + TopicConfigs []TopicConfig + QueueConfigs []QueueConfig + } + type args struct { + arn Arn + events []EventType + prefix string + suffix string + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "Topic Configuration Removed with events, prefix", + fields: fields{ + XMLName: xml.Name{}, + QueueConfigs: nil, + LambdaConfigs: nil, + TopicConfigs: []TopicConfig{ + { + Config: Config{ + ID: "", + Arn: Arn{ + Partition: "minio", + Service: "sns", + Region: "", + AccountID: "1", + Resource: "kafka", + }, + Events: []EventType{ + ObjectAccessedAll, + }, + Filter: &Filter{ + S3Key: S3Key{ + FilterRules: []FilterRule{ + { + Name: "prefix", + Value: "x", + }, + }, + }, + }, + }, + Topic: "arn:minio:sns::1:kafka", + }, + }, + }, + args: args{ + arn: Arn{ + Partition: "minio", + Service: "sns", + Region: "", + AccountID: "1", + Resource: "kafka", + }, + events: []EventType{ + ObjectAccessedAll, + }, + prefix: "x", + suffix: "", + }, + wantErr: false, + }, + { + name: "Topic Configuration Removed with events, prefix, suffix", + fields: fields{ + XMLName: xml.Name{}, + QueueConfigs: nil, + LambdaConfigs: nil, + TopicConfigs: []TopicConfig{ + { + Config: Config{ + ID: "", + Arn: Arn{ + Partition: "minio", + Service: "sns", + Region: "", + AccountID: "1", + Resource: "kafka", + }, + Events: []EventType{ + ObjectAccessedAll, + }, + Filter: &Filter{ + S3Key: S3Key{ + FilterRules: []FilterRule{ + { + Name: "prefix", + Value: "x", + }, + { + Name: "suffix", + Value: "y", + }, + }, + }, + }, + }, + Topic: "arn:minio:sns::1:kafka", + }, + }, + }, + args: args{ + arn: Arn{ + Partition: "minio", + Service: "sns", + Region: "", + AccountID: "1", + Resource: "kafka", + }, + events: []EventType{ + ObjectAccessedAll, + }, + prefix: "x", + suffix: "y", + }, + wantErr: false, + }, + { + name: "Error Returned Topic Configuration Not Removed", + fields: fields{ + XMLName: xml.Name{}, + QueueConfigs: nil, + LambdaConfigs: nil, + TopicConfigs: []TopicConfig{ + { + Config: Config{ + ID: "", + Arn: Arn{ + Partition: "minio", + Service: "sns", + Region: "", + AccountID: "1", + Resource: "kafka", + }, + Events: []EventType{ + ObjectAccessedAll, + }, + Filter: &Filter{ + S3Key: S3Key{ + FilterRules: []FilterRule{ + { + Name: "prefix", + Value: "x", + }, + }, + }, + }, + }, + Topic: "arn:minio:sns::1:kafka", + }, + }, + }, + args: args{ + arn: Arn{ + Partition: "minio", + Service: "sns", + Region: "", + AccountID: "1", + Resource: "kafka", + }, + events: []EventType{ + ObjectAccessedAll, + }, + prefix: "", + suffix: "", + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := &Configuration{ + XMLName: tt.fields.XMLName, + LambdaConfigs: tt.fields.LambdaConfigs, + TopicConfigs: tt.fields.TopicConfigs, + QueueConfigs: tt.fields.QueueConfigs, + } + if err := b.RemoveTopicByArnEventsPrefixSuffix(tt.args.arn, tt.args.events, tt.args.prefix, tt.args.suffix); (err != nil) != tt.wantErr { + t.Errorf("RemoveTopicByArnEventsPrefixSuffix() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/pkg/policy/bucket-policy-condition.go b/pkg/policy/bucket-policy-condition.go index 737b810ac..04da742dc 100644 --- a/pkg/policy/bucket-policy-condition.go +++ b/pkg/policy/bucket-policy-condition.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,7 +17,7 @@ package policy -import "github.com/minio/minio-go/pkg/set" +import "github.com/minio/minio-go/v7/pkg/set" // ConditionKeyMap - map of policy condition key and value. type ConditionKeyMap map[string]set.StringSet @@ -46,9 +46,7 @@ func (ckm ConditionKeyMap) Remove(key string, value set.StringSet) { // RemoveKey - removes key and its value. func (ckm ConditionKeyMap) RemoveKey(key string) { - if _, ok := ckm[key]; ok { - delete(ckm, key) - } + delete(ckm, key) } // CopyConditionKeyMap - returns new copy of given ConditionKeyMap. @@ -91,9 +89,7 @@ func (cond ConditionMap) Add(condKey string, condKeyMap ConditionKeyMap) { // Remove - removes condition key and its value. func (cond ConditionMap) Remove(condKey string) { - if _, ok := cond[condKey]; ok { - delete(cond, condKey) - } + delete(cond, condKey) } // mergeConditionMap - returns new ConditionMap which contains merged key/value of two ConditionMap. diff --git a/pkg/policy/bucket-policy-condition_test.go b/pkg/policy/bucket-policy-condition_test.go index 9e4aa8fb6..1fc154868 100644 --- a/pkg/policy/bucket-policy-condition_test.go +++ b/pkg/policy/bucket-policy-condition_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,12 +18,14 @@ package policy import ( - "encoding/json" "testing" - "github.com/minio/minio-go/pkg/set" + jsoniter "github.com/json-iterator/go" + "github.com/minio/minio-go/v7/pkg/set" ) +var json = jsoniter.ConfigCompatibleWithStandardLibrary + // ConditionKeyMap.Add() is called and the result is validated. func TestConditionKeyMapAdd(t *testing.T) { condKeyMap := make(ConditionKeyMap) diff --git a/pkg/policy/bucket-policy.go b/pkg/policy/bucket-policy.go index 79fd8019a..bb8c51b07 100644 --- a/pkg/policy/bucket-policy.go +++ b/pkg/policy/bucket-policy.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,12 +18,12 @@ package policy import ( - "encoding/json" "errors" "reflect" "strings" - "github.com/minio/minio-go/pkg/set" + jsoniter "github.com/json-iterator/go" + "github.com/minio/minio-go/v7/pkg/set" ) // BucketPolicy - Bucket level policy. @@ -88,7 +88,8 @@ type User struct { // the reason is that Principal can take a json struct represented by // User string but it can also take a string. func (u *User) UnmarshalJSON(data []byte) error { - // Try to unmarshal data in a struct equal to User, we need it + // Try to unmarshal data in a struct equal to User, + var json = jsoniter.ConfigCompatibleWithStandardLibrary // to avoid infinite recursive call of this function type AliasUser User var au AliasUser diff --git a/pkg/policy/bucket-policy_test.go b/pkg/policy/bucket-policy_test.go index 1a71d8770..6848dbd01 100644 --- a/pkg/policy/bucket-policy_test.go +++ b/pkg/policy/bucket-policy_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,12 +18,11 @@ package policy import ( - "encoding/json" "fmt" "reflect" "testing" - "github.com/minio/minio-go/pkg/set" + "github.com/minio/minio-go/v7/pkg/set" ) // TestUnmarshalBucketPolicy tests unmarsheling various examples diff --git a/pkg/replication/replication.go b/pkg/replication/replication.go new file mode 100644 index 000000000..57e63a08c --- /dev/null +++ b/pkg/replication/replication.go @@ -0,0 +1,380 @@ +/* + * MinIO Client (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package replication + +import ( + "bytes" + "encoding/xml" + "fmt" + "strconv" + "strings" + "unicode/utf8" + + "github.com/rs/xid" +) + +var errInvalidFilter = fmt.Errorf("Invalid filter") + +// OptionType specifies operation to be performed on config +type OptionType string + +const ( + // AddOption specifies addition of rule to config + AddOption OptionType = "Add" + // SetOption specifies modification of existing rule to config + SetOption OptionType = "Set" + + // RemoveOption specifies rule options are for removing a rule + RemoveOption OptionType = "Remove" + // ImportOption is for getting current config + ImportOption OptionType = "Import" +) + +// Options represents options to set a replication configuration rule +type Options struct { + Op OptionType + ID string + Prefix string + RuleStatus string + Priority string + TagString string + StorageClass string + Arn string +} + +// Tags returns a slice of tags for a rule +func (opts Options) Tags() []Tag { + var tagList []Tag + tagTokens := strings.Split(opts.TagString, "&") + for _, tok := range tagTokens { + if tok == "" { + break + } + kv := strings.SplitN(tok, "=", 2) + tagList = append(tagList, Tag{ + Key: kv[0], + Value: kv[1], + }) + } + return tagList +} + +// Config - replication configuration specified in +// https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html +type Config struct { + XMLName xml.Name `xml:"ReplicationConfiguration" json:"-"` + Rules []Rule `xml:"Rule" json:"Rules"` + Role string `xml:"Role" json:"Role"` +} + +// Empty returns true if config is not set +func (c *Config) Empty() bool { + return len(c.Rules) == 0 +} + +// AddRule adds a new rule to existing replication config. If a rule exists with the +// same ID, then the rule is replaced. +func (c *Config) AddRule(opts Options) error { + tags := opts.Tags() + andVal := And{ + Tags: opts.Tags(), + } + filter := Filter{Prefix: opts.Prefix} + // only a single tag is set. + if opts.Prefix == "" && len(tags) == 1 { + filter.Tag = tags[0] + } + // both prefix and tag are present + if len(andVal.Tags) > 1 || opts.Prefix != "" { + filter.And = andVal + filter.And.Prefix = opts.Prefix + filter.Prefix = "" + } + if opts.ID == "" { + opts.ID = xid.New().String() + } + var status Status + // toggle rule status for edit option + switch opts.RuleStatus { + case "enable": + status = Enabled + case "disable": + status = Disabled + } + arnStr := opts.Arn + if opts.Arn == "" { + arnStr = c.Role + } + tokens := strings.Split(arnStr, ":") + if len(tokens) != 6 { + return fmt.Errorf("invalid format for replication Arn") + } + if c.Role == "" { // for new configurations + c.Role = opts.Arn + } + priority, err := strconv.Atoi(opts.Priority) + if err != nil { + return err + } + newRule := Rule{ + ID: opts.ID, + Priority: priority, + Status: status, + Filter: filter, + Destination: Destination{ + Bucket: fmt.Sprintf("arn:aws:s3:::%s", tokens[5]), + StorageClass: opts.StorageClass, + }, + DeleteMarkerReplication: DeleteMarkerReplication{Status: Disabled}, + } + + ruleFound := false + for i, rule := range c.Rules { + if rule.Priority == newRule.Priority && rule.ID != newRule.ID { + return fmt.Errorf("Priority must be unique. Replication configuration already has a rule with this priority") + } + if rule.Destination.Bucket != newRule.Destination.Bucket { + return fmt.Errorf("The destination bucket must be same for all rules") + } + if rule.ID != newRule.ID { + continue + } + if opts.Priority == "" && rule.ID == newRule.ID { + // inherit priority from existing rule, required field on server + newRule.Priority = rule.Priority + } + if opts.RuleStatus == "" { + newRule.Status = rule.Status + } + c.Rules[i] = newRule + ruleFound = true + break + } + // validate rule after overlaying priority for pre-existing rule being disabled. + if err := newRule.Validate(); err != nil { + return err + } + if !ruleFound && opts.Op == SetOption { + return fmt.Errorf("Rule with ID %s not found in replication configuration", opts.ID) + } + if !ruleFound { + c.Rules = append(c.Rules, newRule) + } + return nil +} + +// RemoveRule removes a rule from replication config. +func (c *Config) RemoveRule(opts Options) error { + var newRules []Rule + for _, rule := range c.Rules { + if rule.ID != opts.ID { + newRules = append(newRules, rule) + } + } + + if len(newRules) == 0 { + return fmt.Errorf("Replication configuration should have at least one rule") + } + c.Rules = newRules + return nil + +} + +// Rule - a rule for replication configuration. +type Rule struct { + XMLName xml.Name `xml:"Rule" json:"-"` + ID string `xml:"ID,omitempty"` + Status Status `xml:"Status"` + Priority int `xml:"Priority"` + DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication"` + Destination Destination `xml:"Destination"` + Filter Filter `xml:"Filter" json:"Filter"` +} + +// Validate validates the rule for correctness +func (r Rule) Validate() error { + if err := r.validateID(); err != nil { + return err + } + if err := r.validateStatus(); err != nil { + return err + } + if err := r.validateFilter(); err != nil { + return err + } + + if r.Priority < 0 && r.Status == Enabled { + return fmt.Errorf("Priority must be set for the rule") + } + + return nil +} + +// validateID - checks if ID is valid or not. +func (r Rule) validateID() error { + // cannot be longer than 255 characters + if len(r.ID) > 255 { + return fmt.Errorf("ID must be less than 255 characters") + } + return nil +} + +// validateStatus - checks if status is valid or not. +func (r Rule) validateStatus() error { + // Status can't be empty + if len(r.Status) == 0 { + return fmt.Errorf("status cannot be empty") + } + + // Status must be one of Enabled or Disabled + if r.Status != Enabled && r.Status != Disabled { + return fmt.Errorf("status must be set to either Enabled or Disabled") + } + return nil +} + +func (r Rule) validateFilter() error { + if err := r.Filter.Validate(); err != nil { + return err + } + return nil +} + +// Prefix - a rule can either have prefix under or under +// . This method returns the prefix from the +// location where it is available +func (r Rule) Prefix() string { + if r.Filter.Prefix != "" { + return r.Filter.Prefix + } + return r.Filter.And.Prefix +} + +// Tags - a rule can either have tag under or under +// . This method returns all the tags from the +// rule in the format tag1=value1&tag2=value2 +func (r Rule) Tags() string { + if len(r.Filter.And.Tags) != 0 { + var buf bytes.Buffer + for _, t := range r.Filter.And.Tags { + if buf.Len() > 0 { + buf.WriteString("&") + } + buf.WriteString(t.String()) + } + return buf.String() + } + return "" +} + +// Filter - a filter for a replication configuration Rule. +type Filter struct { + XMLName xml.Name `xml:"Filter" json:"-"` + Prefix string `json:"Prefix,omitempty"` + And And `xml:"And,omitempty" json:"And,omitempty"` + Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` +} + +// Validate - validates the filter element +func (f Filter) Validate() error { + // A Filter must have exactly one of Prefix, Tag, or And specified. + if !f.And.isEmpty() { + if f.Prefix != "" { + return errInvalidFilter + } + if !f.Tag.IsEmpty() { + return errInvalidFilter + } + } + if f.Prefix != "" { + if !f.Tag.IsEmpty() { + return errInvalidFilter + } + } + if !f.Tag.IsEmpty() { + if err := f.Tag.Validate(); err != nil { + return err + } + } + return nil +} + +// Tag - a tag for a replication configuration Rule filter. +type Tag struct { + XMLName xml.Name `json:"-"` + Key string `xml:"Key,omitempty" json:"Key,omitempty"` + Value string `xml:"Value,omitempty" json:"Value,omitempty"` +} + +func (tag Tag) String() string { + return tag.Key + "=" + tag.Value +} + +// IsEmpty returns whether this tag is empty or not. +func (tag Tag) IsEmpty() bool { + return tag.Key == "" +} + +// Validate checks this tag. +func (tag Tag) Validate() error { + if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 { + return fmt.Errorf("Invalid Tag Key") + } + + if utf8.RuneCountInString(tag.Value) > 256 { + return fmt.Errorf("Invalid Tag Value") + } + return nil +} + +// Destination - destination in ReplicationConfiguration. +type Destination struct { + XMLName xml.Name `xml:"Destination" json:"-"` + Bucket string `xml:"Bucket" json:"Bucket"` + StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` +} + +// And - a tag to combine a prefix and multiple tags for replication configuration rule. +type And struct { + XMLName xml.Name `xml:"And,omitempty" json:"-"` + Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` + Tags []Tag `xml:"Tag,omitempty" json:"Tags,omitempty"` +} + +// isEmpty returns true if Tags field is null +func (a And) isEmpty() bool { + return len(a.Tags) == 0 && a.Prefix == "" +} + +// Status represents Enabled/Disabled status +type Status string + +// Supported status types +const ( + Enabled Status = "Enabled" + Disabled Status = "Disabled" +) + +// DeleteMarkerReplication - whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html +type DeleteMarkerReplication struct { + Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default +} + +// IsEmpty returns true if DeleteMarkerReplication is not set +func (d DeleteMarkerReplication) IsEmpty() bool { + return len(d.Status) == 0 +} diff --git a/pkg/s3utils/utils.go b/pkg/s3utils/utils.go index adceb7f2a..65f939253 100644 --- a/pkg/s3utils/utils.go +++ b/pkg/s3utils/utils.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -47,8 +47,8 @@ func IsValidDomain(host string) bool { if host[len(host)-1:] == "_" || host[:1] == "_" { return false } - // host cannot start or end with a "." - if host[len(host)-1:] == "." || host[:1] == "." { + // host cannot start with a "." + if host[:1] == "." { return false } // All non alphanumeric characters are invalid. @@ -78,22 +78,28 @@ func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool { return false } // Return true for all other cases - return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL) + return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL) || IsAliyunOSSEndpoint(endpointURL) } // Refer for region styles - https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region // amazonS3HostHyphen - regular expression used to determine if an arg is s3 host in hyphenated style. -var amazonS3HostHyphen = regexp.MustCompile(`^s3-(.*?)\.amazonaws\.com$`) +var amazonS3HostHyphen = regexp.MustCompile(`^s3-(.*?).amazonaws.com$`) // amazonS3HostDualStack - regular expression used to determine if an arg is s3 host dualstack. -var amazonS3HostDualStack = regexp.MustCompile(`^s3\.dualstack\.(.*?)\.amazonaws\.com$`) +var amazonS3HostDualStack = regexp.MustCompile(`^s3.dualstack.(.*?).amazonaws.com$`) // amazonS3HostDot - regular expression used to determine if an arg is s3 host in . style. -var amazonS3HostDot = regexp.MustCompile(`^s3\.(.*?)\.amazonaws\.com$`) +var amazonS3HostDot = regexp.MustCompile(`^s3.(.*?).amazonaws.com$`) // amazonS3ChinaHost - regular expression used to determine if the arg is s3 china host. -var amazonS3ChinaHost = regexp.MustCompile(`^s3\.(cn.*?)\.amazonaws\.com\.cn$`) +var amazonS3ChinaHost = regexp.MustCompile(`^s3.(cn.*?).amazonaws.com.cn$`) + +// Regular expression used to determine if the arg is elb host. +var elbAmazonRegex = regexp.MustCompile(`elb(.*?).amazonaws.com$`) + +// Regular expression used to determine if the arg is elb host in china. +var elbAmazonCnRegex = regexp.MustCompile(`elb(.*?).amazonaws.com.cn$`) // GetRegionFromURL - returns a region from url host. func GetRegionFromURL(endpointURL url.URL) string { @@ -106,6 +112,10 @@ func GetRegionFromURL(endpointURL url.URL) string { if IsAmazonGovCloudEndpoint(endpointURL) { return "us-gov-west-1" } + // if elb's are used we cannot calculate which region it may be, just return empty. + if elbAmazonRegex.MatchString(endpointURL.Host) || elbAmazonCnRegex.MatchString(endpointURL.Host) { + return "" + } parts := amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host) if len(parts) > 1 { return parts[1] @@ -125,6 +135,11 @@ func GetRegionFromURL(endpointURL url.URL) string { return "" } +// IsAliyunOSSEndpoint - Match if it is exactly Aliyun OSS endpoint. +func IsAliyunOSSEndpoint(endpointURL url.URL) bool { + return strings.HasSuffix(endpointURL.Host, "aliyuncs.com") +} + // IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint. func IsAmazonEndpoint(endpointURL url.URL) bool { if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" { @@ -219,6 +234,44 @@ func QueryEncode(v url.Values) string { return buf.String() } +// TagDecode - decodes canonical tag into map of key and value. +func TagDecode(ctag string) map[string]string { + if ctag == "" { + return map[string]string{} + } + tags := strings.Split(ctag, "&") + tagMap := make(map[string]string, len(tags)) + var err error + for _, tag := range tags { + kvs := strings.SplitN(tag, "=", 2) + if len(kvs) == 0 { + return map[string]string{} + } + if len(kvs) == 1 { + return map[string]string{} + } + tagMap[kvs[0]], err = url.PathUnescape(kvs[1]) + if err != nil { + continue + } + } + return tagMap +} + +// TagEncode - encodes tag values in their URL encoded form. In +// addition to the percent encoding performed by urlEncodePath() used +// here, it also percent encodes '/' (forward slash) +func TagEncode(tags map[string]string) string { + if tags == nil { + return "" + } + values := url.Values{} + for k, v := range tags { + values[k] = []string{v} + } + return QueryEncode(values) +} + // if object matches reserved string, no need to encode them var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") @@ -274,15 +327,15 @@ func checkBucketNameCommon(bucketName string, strict bool) (err error) { return errors.New("Bucket name cannot be empty") } if len(bucketName) < 3 { - return errors.New("Bucket name cannot be smaller than 3 characters") + return errors.New("Bucket name cannot be shorter than 3 characters") } if len(bucketName) > 63 { - return errors.New("Bucket name cannot be greater than 63 characters") + return errors.New("Bucket name cannot be longer than 63 characters") } if ipAddress.MatchString(bucketName) { return errors.New("Bucket name cannot be an ip address") } - if strings.Contains(bucketName, "..") { + if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") { return errors.New("Bucket name contains invalid characters") } if strict { @@ -313,7 +366,7 @@ func CheckValidBucketNameStrict(bucketName string) (err error) { // - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html func CheckValidObjectNamePrefix(objectName string) error { if len(objectName) > 1024 { - return errors.New("Object name cannot be greater than 1024 characters") + return errors.New("Object name cannot be longer than 1024 characters") } if !utf8.ValidString(objectName) { return errors.New("Object name with non UTF-8 strings are not supported") diff --git a/pkg/s3utils/utils_test.go b/pkg/s3utils/utils_test.go index 55eaaeacf..125456031 100644 --- a/pkg/s3utils/utils_test.go +++ b/pkg/s3utils/utils_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ package s3utils import ( "errors" "net/url" + "reflect" "testing" ) @@ -73,6 +74,23 @@ func TestGetRegionFromURL(t *testing.T) { u: url.URL{Host: "s3-external-1.amazonaws.com"}, expectedRegion: "", }, + { + u: url.URL{ + Host: "s3.kubernetesfrontendlb-caf78da2b1f7516c.elb.us-west-2.amazonaws.com", + }, + expectedRegion: "", + }, + { + u: url.URL{ + Host: "s3.kubernetesfrontendlb-caf78da2b1f7516c.elb.amazonaws.com", + }, + expectedRegion: "", + }, + { + u: url.URL{ + Host: "s3.kubernetesfrontendlb-caf78da2b1f7516c.elb.amazonaws.com.cn", + }, + }, } for i, testCase := range testCases { @@ -99,6 +117,7 @@ func TestIsValidDomain(t *testing.T) { {"s3.amz.test.com", true}, {"s3.%%", false}, {"localhost", true}, + {"localhost.", true}, // http://www.dns-sd.org/trailingdotsindomainnames.html {"-localhost", false}, {"", false}, {"\n \t", false}, @@ -294,6 +313,81 @@ func TestQueryEncode(t *testing.T) { } } +// Tests tag decode to map +func TestTagDecode(t *testing.T) { + testCases := []struct { + // canonical input + canonicalInput string + + // Expected result. + resultMap map[string]string + }{ + {"k=thisisthe%25url", map[string]string{"k": "thisisthe%url"}}, + {"k=%E6%9C%AC%E8%AA%9E", map[string]string{"k": "本語"}}, + {"k=%E6%9C%AC%E8%AA%9E.1", map[string]string{"k": "本語.1"}}, + {"k=%3E123", map[string]string{"k": ">123"}}, + {"k=myurl%23link", map[string]string{"k": "myurl#link"}}, + {"k=space%20in%20url", map[string]string{"k": "space in url"}}, + {"k=url%2Bpath", map[string]string{"k": "url+path"}}, + {"k=url%2Fpath", map[string]string{"k": "url/path"}}, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run("", func(t *testing.T) { + gotResult := TagDecode(testCase.canonicalInput) + if !reflect.DeepEqual(testCase.resultMap, gotResult) { + t.Errorf("Expected %s, got %s", testCase.resultMap, gotResult) + } + }) + } +} + +// Tests tag encode function for user tags. +func TestTagEncode(t *testing.T) { + testCases := []struct { + // Input. + inputMap map[string]string + // Expected result. + result string + }{ + {map[string]string{ + "k": "thisisthe%url", + }, "k=thisisthe%25url"}, + {map[string]string{ + "k": "本語", + }, "k=%E6%9C%AC%E8%AA%9E"}, + {map[string]string{ + "k": "本語.1", + }, "k=%E6%9C%AC%E8%AA%9E.1"}, + {map[string]string{ + "k": ">123", + }, "k=%3E123"}, + {map[string]string{ + "k": "myurl#link", + }, "k=myurl%23link"}, + {map[string]string{ + "k": "space in url", + }, "k=space%20in%20url"}, + {map[string]string{ + "k": "url+path", + }, "k=url%2Bpath"}, + {map[string]string{ + "k": "url/path", + }, "k=url%2Fpath"}, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run("", func(t *testing.T) { + gotResult := TagEncode(testCase.inputMap) + if testCase.result != gotResult { + t.Errorf("Expected %s, got %s", testCase.result, gotResult) + } + }) + } +} + // Tests validate the URL path encoder. func TestEncodePath(t *testing.T) { testCases := []struct { @@ -309,6 +403,7 @@ func TestEncodePath(t *testing.T) { {"myurl#link", "myurl%23link"}, {"space in url", "space%20in%20url"}, {"url+path", "url%2Bpath"}, + {"url/path", "url/path"}, } for i, testCase := range testCases { @@ -332,9 +427,11 @@ func TestIsValidBucketName(t *testing.T) { {".mybucket", errors.New("Bucket name contains invalid characters"), false}, {"$mybucket", errors.New("Bucket name contains invalid characters"), false}, {"mybucket-", errors.New("Bucket name contains invalid characters"), false}, - {"my", errors.New("Bucket name cannot be smaller than 3 characters"), false}, + {"my", errors.New("Bucket name cannot be shorter than 3 characters"), false}, {"", errors.New("Bucket name cannot be empty"), false}, {"my..bucket", errors.New("Bucket name contains invalid characters"), false}, + {"my.-bucket", errors.New("Bucket name contains invalid characters"), false}, + {"my-.bucket", errors.New("Bucket name contains invalid characters"), false}, {"192.168.1.168", errors.New("Bucket name cannot be an ip address"), false}, {":bucketname", errors.New("Bucket name contains invalid characters"), false}, {"_bucketName", errors.New("Bucket name contains invalid characters"), false}, @@ -378,9 +475,11 @@ func TestIsValidBucketNameStrict(t *testing.T) { {".mybucket", errors.New("Bucket name contains invalid characters"), false}, {"$mybucket", errors.New("Bucket name contains invalid characters"), false}, {"mybucket-", errors.New("Bucket name contains invalid characters"), false}, - {"my", errors.New("Bucket name cannot be smaller than 3 characters"), false}, + {"my", errors.New("Bucket name cannot be shorter than 3 characters"), false}, {"", errors.New("Bucket name cannot be empty"), false}, {"my..bucket", errors.New("Bucket name contains invalid characters"), false}, + {"my.-bucket", errors.New("Bucket name contains invalid characters"), false}, + {"my-.bucket", errors.New("Bucket name contains invalid characters"), false}, {"192.168.1.168", errors.New("Bucket name cannot be an ip address"), false}, {"Mybucket", errors.New("Bucket name contains invalid characters"), false}, {"my.bucket.com", nil, true}, diff --git a/pkg/set/stringset.go b/pkg/set/stringset.go index efd02629b..c35e58e1a 100644 --- a/pkg/set/stringset.go +++ b/pkg/set/stringset.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,14 +18,17 @@ package set import ( - "encoding/json" "fmt" "sort" + + jsoniter "github.com/json-iterator/go" ) // StringSet - uses map as set of strings. type StringSet map[string]struct{} +var json = jsoniter.ConfigCompatibleWithStandardLibrary + // ToSlice - returns StringSet as string slice. func (set StringSet) ToSlice() []string { keys := make([]string, 0, len(set)) diff --git a/pkg/set/stringset_test.go b/pkg/set/stringset_test.go index d7e6aa799..12aa6f90a 100644 --- a/pkg/set/stringset_test.go +++ b/pkg/set/stringset_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -127,7 +127,7 @@ func TestStringSetFuncMatch(t *testing.T) { }{ // Test to check match function doing case insensive compare. {func(setValue string, compareValue string) bool { - return strings.ToUpper(setValue) == strings.ToUpper(compareValue) + return strings.EqualFold(setValue, compareValue) }, "Bar", `[bar]`}, // Test to check match function doing prefix check. {func(setValue string, compareValue string) bool { diff --git a/pkg/s3signer/request-signature-streaming.go b/pkg/signer/request-signature-streaming.go similarity index 95% rename from pkg/s3signer/request-signature-streaming.go rename to pkg/signer/request-signature-streaming.go index 156a6d63a..7b2ca91d1 100644 --- a/pkg/s3signer/request-signature-streaming.go +++ b/pkg/signer/request-signature-streaming.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ * limitations under the License. */ -package s3signer +package signer import ( "bytes" @@ -82,7 +82,7 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [ stringToSignParts := []string{ streamingPayloadHdr, t.Format(iso8601DateFormat), - getScope(region, t), + getScope(region, t, ServiceTypeS3), previousSig, emptySHA256, hex.EncodeToString(sum256(chunkData)), @@ -118,19 +118,19 @@ func buildChunkSignature(chunkData []byte, reqTime time.Time, region, chunkStringToSign := buildChunkStringToSign(reqTime, region, previousSignature, chunkData) - signingKey := getSigningKey(secretAccessKey, region, reqTime) + signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) return getSignature(signingKey, chunkStringToSign) } // getSeedSignature - returns the seed signature for a given request. func (s *StreamingReader) setSeedSignature(req *http.Request) { // Get canonical request - canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders) + canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders, getHashedPayload(*req)) // Get string to sign from canonical request. - stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest) + stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest, ServiceTypeS3) - signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime) + signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime, ServiceTypeS3) // Calculate signature. s.seedSignature = getSignature(signingKey, stringToSign) @@ -185,7 +185,7 @@ func (s *StreamingReader) signChunk(chunkLen int) { // setStreamingAuthHeader - builds and sets authorization header value // for streaming signature. func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { - credential := GetCredential(s.accessKeyID, s.region, s.reqTime) + credential := GetCredential(s.accessKeyID, s.region, s.reqTime, ServiceTypeS3) authParts := []string{ signV4Algorithm + " Credential=" + credential, "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders), @@ -285,7 +285,7 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { // bytes read from baseReader different than // content length provided. if s.bytesRead != s.contentLen { - return 0, io.ErrUnexpectedEOF + return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) } // Sign the chunk and write it to s.buf. diff --git a/pkg/s3signer/request-signature-streaming_test.go b/pkg/signer/request-signature-streaming_test.go similarity index 91% rename from pkg/s3signer/request-signature-streaming_test.go rename to pkg/signer/request-signature-streaming_test.go index 297ab97be..43b614ee8 100644 --- a/pkg/s3signer/request-signature-streaming_test.go +++ b/pkg/signer/request-signature-streaming_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,11 +15,12 @@ * limitations under the License. */ -package s3signer +package signer import ( "bytes" "io/ioutil" + "net/http" "testing" "time" ) @@ -31,7 +32,7 @@ func TestGetSeedSignature(t *testing.T) { data := bytes.Repeat([]byte("a"), dataLen) body := ioutil.NopCloser(bytes.NewReader(data)) - req := NewRequest("PUT", "/examplebucket/chunkObject.txt", body) + req := NewRequest(http.MethodPut, "/examplebucket/chunkObject.txt", body) req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") req.Host = "s3.amazonaws.com" @@ -67,7 +68,7 @@ func TestSetStreamingAuthorization(t *testing.T) { secretAccessKeyID := "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" accessKeyID := "AKIAIOSFODNN7EXAMPLE" - req := NewRequest("PUT", "/examplebucket/chunkObject.txt", nil) + req := NewRequest(http.MethodPut, "/examplebucket/chunkObject.txt", nil) req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") req.Host = "" req.URL.Host = "s3.amazonaws.com" @@ -91,7 +92,7 @@ func TestStreamingReader(t *testing.T) { accessKeyID := "AKIAIOSFODNN7EXAMPLE" dataLen := int64(65 * 1024) - req := NewRequest("PUT", "/examplebucket/chunkObject.txt", nil) + req := NewRequest(http.MethodPut, "/examplebucket/chunkObject.txt", nil) req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") req.ContentLength = 65 * 1024 req.Host = "" diff --git a/pkg/s3signer/request-signature-v2.go b/pkg/signer/request-signature-v2.go similarity index 98% rename from pkg/s3signer/request-signature-v2.go rename to pkg/signer/request-signature-v2.go index b4070938e..71821a26a 100644 --- a/pkg/s3signer/request-signature-v2.go +++ b/pkg/signer/request-signature-v2.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ * limitations under the License. */ -package s3signer +package signer import ( "bytes" @@ -30,7 +30,7 @@ import ( "strings" "time" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/s3utils" ) // Signature and API related constants. @@ -262,6 +262,7 @@ var resourceList = []string{ "notification", "partNumber", "policy", + "replication", "requestPayment", "response-cache-control", "response-content-disposition", diff --git a/pkg/s3signer/request-signature-v2_test.go b/pkg/signer/request-signature-v2_test.go similarity index 90% rename from pkg/s3signer/request-signature-v2_test.go rename to pkg/signer/request-signature-v2_test.go index 042b6e65c..c73c779b5 100644 --- a/pkg/s3signer/request-signature-v2_test.go +++ b/pkg/signer/request-signature-v2_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ * limitations under the License. */ -package s3signer +package signer import ( "sort" diff --git a/pkg/s3signer/request-signature-v4.go b/pkg/signer/request-signature-v4.go similarity index 77% rename from pkg/s3signer/request-signature-v4.go rename to pkg/signer/request-signature-v4.go index d0ddbfa0e..67572b20d 100644 --- a/pkg/s3signer/request-signature-v4.go +++ b/pkg/signer/request-signature-v4.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ * limitations under the License. */ -package s3signer +package signer import ( "bytes" @@ -26,7 +26,7 @@ import ( "strings" "time" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/s3utils" ) // Signature and API related constants. @@ -36,6 +36,12 @@ const ( yyyymmdd = "20060102" ) +// Different service types +const ( + ServiceTypeS3 = "s3" + ServiceTypeSTS = "sts" +) + /// /// Excerpts from @lsegal - /// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. @@ -47,42 +53,21 @@ const ( /// by other agents) or when customers pass requests through /// proxies, which may modify the user-agent. /// -/// Content-Length: -/// -/// This is ignored from signing because generating a pre-signed -/// URL should not provide a content-length constraint, -/// specifically when vending a S3 pre-signed PUT URL. The -/// corollary to this is that when sending regular requests -/// (non-pre-signed), the signature contains a checksum of the -/// body, which implicitly validates the payload length (since -/// changing the number of bytes would change the checksum) -/// and therefore this header is not valuable in the signature. -/// -/// Content-Type: -/// -/// Signing this header causes quite a number of problems in -/// browser environments, where browsers like to modify and -/// normalize the content-type header in different ways. There is -/// more information on this in https://goo.gl/2E9gyy. Avoiding -/// this field simplifies logic and reduces the possibility of -/// future bugs. /// /// Authorization: /// /// Is skipped for obvious reasons /// var v4IgnoredHeaders = map[string]bool{ - "Authorization": true, - "Content-Type": true, - "Content-Length": true, - "User-Agent": true, + "Authorization": true, + "User-Agent": true, } // getSigningKey hmac seed to calculate final signature. -func getSigningKey(secret, loc string, t time.Time) []byte { +func getSigningKey(secret, loc string, t time.Time, serviceType string) []byte { date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) location := sumHMAC(date, []byte(loc)) - service := sumHMAC(location, []byte("s3")) + service := sumHMAC(location, []byte(serviceType)) signingKey := sumHMAC(service, []byte("aws4_request")) return signingKey } @@ -94,19 +79,19 @@ func getSignature(signingKey []byte, stringToSign string) string { // getScope generate a string of a specific date, an AWS region, and a // service. -func getScope(location string, t time.Time) string { +func getScope(location string, t time.Time, serviceType string) string { scope := strings.Join([]string{ t.Format(yyyymmdd), location, - "s3", + serviceType, "aws4_request", }, "/") return scope } // GetCredential generate a credential string. -func GetCredential(accessKeyID, location string, t time.Time) string { - scope := getScope(location, t) +func GetCredential(accessKeyID, location string, t time.Time, serviceType string) string { + scope := getScope(location, t, serviceType) return accessKeyID + "/" + scope } @@ -184,7 +169,7 @@ func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { // \n // \n // -func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool) string { +func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string { req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) canonicalRequest := strings.Join([]string{ req.Method, @@ -192,15 +177,15 @@ func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool) strin req.URL.RawQuery, getCanonicalHeaders(req, ignoredHeaders), getSignedHeaders(req, ignoredHeaders), - getHashedPayload(req), + hashedPayload, }, "\n") return canonicalRequest } // getStringToSign a string based on selected query values. -func getStringToSignV4(t time.Time, location, canonicalRequest string) string { +func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string { stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" - stringToSign = stringToSign + getScope(location, t) + "\n" + stringToSign = stringToSign + getScope(location, t, serviceType) + "\n" stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) return stringToSign } @@ -217,7 +202,7 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, loc t := time.Now().UTC() // Get credential string. - credential := GetCredential(accessKeyID, location, t) + credential := GetCredential(accessKeyID, location, t, ServiceTypeS3) // Get all signed headers. signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) @@ -236,13 +221,13 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, loc req.URL.RawQuery = query.Encode() // Get canonical request. - canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, getHashedPayload(req)) // Get string to sign from canonical request. - stringToSign := getStringToSignV4(t, location, canonicalRequest) + stringToSign := getStringToSignV4(t, location, canonicalRequest, ServiceTypeS3) // Gext hmac signing key. - signingKey := getSigningKey(secretAccessKey, location, t) + signingKey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3) // Calculate signature. signature := getSignature(signingKey, stringToSign) @@ -257,15 +242,19 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, loc // requests. func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { // Get signining key. - signingkey := getSigningKey(secretAccessKey, location, t) + signingkey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3) // Calculate signature. signature := getSignature(signingkey, policyBase64) return signature } -// SignV4 sign the request before Do(), in accordance with -// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. -func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { +// SignV4STS - signature v4 for STS request. +func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS) +} + +// Internal function called for different service types. +func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string) *http.Request { // Signature calculation is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -282,17 +271,25 @@ func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati req.Header.Set("X-Amz-Security-Token", sessionToken) } + hashedPayload := getHashedPayload(req) + if serviceType == ServiceTypeSTS { + // Content sha256 header is not sent with the request + // but it is expected to have sha256 of payload for signature + // in STS service type request. + req.Header.Del("X-Amz-Content-Sha256") + } + // Get canonical request. - canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, hashedPayload) // Get string to sign from canonical request. - stringToSign := getStringToSignV4(t, location, canonicalRequest) + stringToSign := getStringToSignV4(t, location, canonicalRequest, serviceType) // Get hmac signing key. - signingKey := getSigningKey(secretAccessKey, location, t) + signingKey := getSigningKey(secretAccessKey, location, t, serviceType) // Get credential string. - credential := GetCredential(accessKeyID, location, t) + credential := GetCredential(accessKeyID, location, t, serviceType) // Get all signed headers. signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) @@ -313,3 +310,9 @@ func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati return &req } + +// SignV4 sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. +func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3) +} diff --git a/pkg/s3signer/request-signature-v4_test.go b/pkg/signer/request-signature-v4_test.go similarity index 90% rename from pkg/s3signer/request-signature-v4_test.go rename to pkg/signer/request-signature-v4_test.go index a109a4f2a..c67a47230 100644 --- a/pkg/s3signer/request-signature-v4_test.go +++ b/pkg/signer/request-signature-v4_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ * limitations under the License. */ -package s3signer +package signer import ( "io" @@ -38,7 +38,7 @@ func TestRequestHost(t *testing.T) { func buildRequest(serviceName, region, body string) (*http.Request, io.ReadSeeker) { endpoint := "https://" + serviceName + "." + region + ".amazonaws.com" reader := strings.NewReader(body) - req, _ := http.NewRequest("POST", endpoint, reader) + req, _ := http.NewRequest(http.MethodPost, endpoint, reader) req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()" req.Header.Add("X-Amz-Target", "prefix.Operation") req.Header.Add("Content-Type", "application/x-amz-json-1.0") diff --git a/pkg/s3signer/request-signature_test.go b/pkg/signer/request-signature_test.go similarity index 91% rename from pkg/s3signer/request-signature_test.go rename to pkg/signer/request-signature_test.go index 75115d19c..3709160a3 100644 --- a/pkg/s3signer/request-signature_test.go +++ b/pkg/signer/request-signature_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ * limitations under the License. */ -package s3signer +package signer import ( "net/http" @@ -25,7 +25,7 @@ import ( // Tests signature calculation. func TestSignatureCalculationV4(t *testing.T) { - req, err := http.NewRequest("GET", "https://s3.amazonaws.com", nil) + req, err := http.NewRequest(http.MethodGet, "https://s3.amazonaws.com", nil) if err != nil { t.Fatal("Error:", err) } @@ -61,7 +61,7 @@ func TestSignatureCalculationV2(t *testing.T) { } for i, testCase := range testCases { - req, err := http.NewRequest("GET", testCase.endpointURL, nil) + req, err := http.NewRequest(http.MethodGet, testCase.endpointURL, nil) if err != nil { t.Fatalf("Test %d, Error: %v", i+1, err) } diff --git a/pkg/s3signer/test-utils_test.go b/pkg/signer/test-utils_test.go similarity index 94% rename from pkg/s3signer/test-utils_test.go rename to pkg/signer/test-utils_test.go index cf96d66c8..c39ab83bc 100644 --- a/pkg/s3signer/test-utils_test.go +++ b/pkg/signer/test-utils_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ * limitations under the License. */ -package s3signer +package signer import ( "bufio" @@ -43,7 +43,7 @@ import ( // // The Request.Proto is always HTTP/1.1. // -// An empty method means "GET". +// An empty method means http.MethodGet. // // The provided body may be nil. If the body is of type *bytes.Reader, // *strings.Reader, or *bytes.Buffer, the Request.ContentLength is @@ -53,7 +53,7 @@ import ( // panic is acceptable. func NewRequest(method, target string, body io.Reader) *http.Request { if method == "" { - method = "GET" + method = http.MethodGet } req, err := http.ReadRequest(bufio.NewReader(strings.NewReader(method + " " + target + " HTTP/1.0\r\n\r\n"))) if err != nil { diff --git a/pkg/s3signer/utils.go b/pkg/signer/utils.go similarity index 92% rename from pkg/s3signer/utils.go rename to pkg/signer/utils.go index c307c47ee..2192a3693 100644 --- a/pkg/s3signer/utils.go +++ b/pkg/signer/utils.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,13 +15,14 @@ * limitations under the License. */ -package s3signer +package signer import ( "crypto/hmac" - "crypto/sha256" "net/http" "strings" + + "github.com/minio/sha256-simd" ) // unsignedPayload - value to be set to X-Amz-Content-Sha256 header when diff --git a/pkg/s3signer/utils_test.go b/pkg/signer/utils_test.go similarity index 96% rename from pkg/s3signer/utils_test.go rename to pkg/signer/utils_test.go index a5de14d6e..745699ee3 100644 --- a/pkg/s3signer/utils_test.go +++ b/pkg/signer/utils_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ * limitations under the License. */ -package s3signer +package signer import ( "fmt" diff --git a/pkg/sse/sse.go b/pkg/sse/sse.go new file mode 100644 index 000000000..b5fb9565a --- /dev/null +++ b/pkg/sse/sse.go @@ -0,0 +1,66 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sse + +import "encoding/xml" + +// ApplySSEByDefault defines default encryption configuration, KMS or SSE. To activate +// KMS, SSEAlgoritm needs to be set to "aws:kms" +// Minio currently does not support Kms. +type ApplySSEByDefault struct { + KmsMasterKeyID string `xml:"KMSMasterKeyID,omitempty"` + SSEAlgorithm string `xml:"SSEAlgorithm"` +} + +// Rule layer encapsulates default encryption configuration +type Rule struct { + Apply ApplySSEByDefault `xml:"ApplyServerSideEncryptionByDefault"` +} + +// Configuration is the default encryption configuration structure +type Configuration struct { + XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"` + Rules []Rule `xml:"Rule"` +} + +// NewConfigurationSSES3 initializes a new SSE-S3 configuration +func NewConfigurationSSES3() *Configuration { + return &Configuration{ + Rules: []Rule{ + { + Apply: ApplySSEByDefault{ + SSEAlgorithm: "AES256", + }, + }, + }, + } +} + +// NewConfigurationSSEKMS initializes a new SSE-KMS configuration +func NewConfigurationSSEKMS(kmsMasterKey string) *Configuration { + return &Configuration{ + Rules: []Rule{ + { + Apply: ApplySSEByDefault{ + KmsMasterKeyID: kmsMasterKey, + SSEAlgorithm: "aws:kms", + }, + }, + }, + } +} diff --git a/pkg/tags/tags.go b/pkg/tags/tags.go new file mode 100644 index 000000000..65ba38b10 --- /dev/null +++ b/pkg/tags/tags.go @@ -0,0 +1,342 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tags + +import ( + "encoding/xml" + "io" + "net/url" + "strings" + "unicode/utf8" +) + +// Error contains tag specific error. +type Error interface { + error + Code() string +} + +type errTag struct { + code string + message string +} + +// Code contains error code. +func (err errTag) Code() string { + return err.code +} + +// Error contains error message. +func (err errTag) Error() string { + return err.message +} + +var ( + errTooManyObjectTags = &errTag{"BadRequest", "Tags cannot be more than 10"} + errTooManyTags = &errTag{"BadRequest", "Tags cannot be more than 50"} + errInvalidTagKey = &errTag{"InvalidTag", "The TagKey you have provided is invalid"} + errInvalidTagValue = &errTag{"InvalidTag", "The TagValue you have provided is invalid"} + errDuplicateTagKey = &errTag{"InvalidTag", "Cannot provide multiple Tags with the same key"} +) + +// Tag comes with limitation as per +// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html amd +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions +const ( + maxKeyLength = 128 + maxValueLength = 256 + maxObjectTagCount = 10 + maxTagCount = 50 +) + +func checkKey(key string) error { + if len(key) == 0 || utf8.RuneCountInString(key) > maxKeyLength || strings.Contains(key, "&") { + return errInvalidTagKey + } + + return nil +} + +func checkValue(value string) error { + if utf8.RuneCountInString(value) > maxValueLength || strings.Contains(value, "&") { + return errInvalidTagValue + } + + return nil +} + +// Tag denotes key and value. +type Tag struct { + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +func (tag Tag) String() string { + return tag.Key + "=" + tag.Value +} + +// IsEmpty returns whether this tag is empty or not. +func (tag Tag) IsEmpty() bool { + return tag.Key == "" +} + +// Validate checks this tag. +func (tag Tag) Validate() error { + if err := checkKey(tag.Key); err != nil { + return err + } + + return checkValue(tag.Value) +} + +// MarshalXML encodes to XML data. +func (tag Tag) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := tag.Validate(); err != nil { + return err + } + + type subTag Tag // to avoid recursively calling MarshalXML() + return e.EncodeElement(subTag(tag), start) +} + +// UnmarshalXML decodes XML data to tag. +func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type subTag Tag // to avoid recursively calling UnmarshalXML() + var st subTag + if err := d.DecodeElement(&st, &start); err != nil { + return err + } + + if err := Tag(st).Validate(); err != nil { + return err + } + + *tag = Tag(st) + return nil +} + +// tagSet represents list of unique tags. +type tagSet struct { + tagMap map[string]string + isObject bool +} + +func (tags tagSet) String() string { + s := []string{} + for key, value := range tags.tagMap { + s = append(s, key+"="+value) + } + + return strings.Join(s, "&") +} + +func (tags *tagSet) remove(key string) { + delete(tags.tagMap, key) +} + +func (tags *tagSet) set(key, value string, failOnExist bool) error { + if failOnExist { + if _, found := tags.tagMap[key]; found { + return errDuplicateTagKey + } + } + + if err := checkKey(key); err != nil { + return err + } + + if err := checkValue(value); err != nil { + return err + } + + if tags.isObject { + if len(tags.tagMap) == maxObjectTagCount { + return errTooManyObjectTags + } + } else if len(tags.tagMap) == maxTagCount { + return errTooManyTags + } + + tags.tagMap[key] = value + return nil +} + +func (tags tagSet) toMap() map[string]string { + m := make(map[string]string) + for key, value := range tags.tagMap { + m[key] = value + } + return m +} + +// MarshalXML encodes to XML data. +func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + tagList := struct { + Tags []Tag `xml:"Tag"` + }{} + + for key, value := range tags.tagMap { + tagList.Tags = append(tagList.Tags, Tag{key, value}) + } + + return e.EncodeElement(tagList, start) +} + +// UnmarshalXML decodes XML data to tag list. +func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tagList := struct { + Tags []Tag `xml:"Tag"` + }{} + + if err := d.DecodeElement(&tagList, &start); err != nil { + return err + } + + if tags.isObject { + if len(tagList.Tags) > maxObjectTagCount { + return errTooManyObjectTags + } + } else if len(tagList.Tags) > maxTagCount { + return errTooManyTags + } + + m := map[string]string{} + for _, tag := range tagList.Tags { + if _, found := m[tag.Key]; found { + return errDuplicateTagKey + } + + m[tag.Key] = tag.Value + } + + tags.tagMap = m + return nil +} + +type tagging struct { + XMLName xml.Name `xml:"Tagging"` + TagSet *tagSet `xml:"TagSet"` +} + +// Tags is list of tags of XML request/response as per +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html#API_GetBucketTagging_RequestBody +type Tags tagging + +func (tags Tags) String() string { + return tags.TagSet.String() +} + +// Remove removes a tag by its key. +func (tags *Tags) Remove(key string) { + tags.TagSet.remove(key) +} + +// Set sets new tag. +func (tags *Tags) Set(key, value string) error { + return tags.TagSet.set(key, value, false) +} + +// ToMap returns copy of tags. +func (tags Tags) ToMap() map[string]string { + return tags.TagSet.toMap() +} + +// MapToObjectTags converts an input map of key and value into +// *Tags data structure with validation. +func MapToObjectTags(tagMap map[string]string) (*Tags, error) { + return NewTags(tagMap, true) +} + +// MapToBucketTags converts an input map of key and value into +// *Tags data structure with validation. +func MapToBucketTags(tagMap map[string]string) (*Tags, error) { + return NewTags(tagMap, false) +} + +// NewTags creates Tags from tagMap, If isObject is set, it validates for object tags. +func NewTags(tagMap map[string]string, isObject bool) (*Tags, error) { + tagging := &Tags{ + TagSet: &tagSet{ + tagMap: make(map[string]string), + isObject: isObject, + }, + } + + for key, value := range tagMap { + if err := tagging.TagSet.set(key, value, true); err != nil { + return nil, err + } + } + + return tagging, nil +} + +func unmarshalXML(reader io.Reader, isObject bool) (*Tags, error) { + tagging := &Tags{ + TagSet: &tagSet{ + tagMap: make(map[string]string), + isObject: isObject, + }, + } + + if err := xml.NewDecoder(reader).Decode(tagging); err != nil { + return nil, err + } + + return tagging, nil +} + +// ParseBucketXML decodes XML data of tags in reader specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html#API_PutBucketTagging_RequestSyntax. +func ParseBucketXML(reader io.Reader) (*Tags, error) { + return unmarshalXML(reader, false) +} + +// ParseObjectXML decodes XML data of tags in reader specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html#API_PutObjectTagging_RequestSyntax +func ParseObjectXML(reader io.Reader) (*Tags, error) { + return unmarshalXML(reader, true) +} + +// Parse decodes HTTP query formatted string into tags which is limited by isObject. +// A query formatted string is like "key1=value1&key2=value2". +func Parse(s string, isObject bool) (*Tags, error) { + values, err := url.ParseQuery(s) + if err != nil { + return nil, err + } + + tagging := &Tags{ + TagSet: &tagSet{ + tagMap: make(map[string]string), + isObject: isObject, + }, + } + + for key := range values { + if err := tagging.TagSet.set(key, values.Get(key), true); err != nil { + return nil, err + } + } + + return tagging, nil +} + +// ParseObjectTags decodes HTTP query formatted string into tags. A query formatted string is like "key1=value1&key2=value2". +func ParseObjectTags(s string) (*Tags, error) { + return Parse(s, true) +} diff --git a/post-policy.go b/post-policy.go index c285fdefd..d489d981a 100644 --- a/post-policy.go +++ b/post-policy.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -73,7 +73,7 @@ func NewPostPolicy() *PostPolicy { // SetExpires - Sets expiration time for the new policy. func (p *PostPolicy) SetExpires(t time.Time) error { if t.IsZero() { - return ErrInvalidArgument("No expiry time set.") + return errInvalidArgument("No expiry time set.") } p.expiration = t return nil @@ -82,7 +82,7 @@ func (p *PostPolicy) SetExpires(t time.Time) error { // SetKey - Sets an object name for the policy based upload. func (p *PostPolicy) SetKey(key string) error { if strings.TrimSpace(key) == "" || key == "" { - return ErrInvalidArgument("Object name is empty.") + return errInvalidArgument("Object name is empty.") } policyCond := policyCondition{ matchType: "eq", @@ -100,7 +100,7 @@ func (p *PostPolicy) SetKey(key string) error { // can start with. func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { - return ErrInvalidArgument("Object prefix is empty.") + return errInvalidArgument("Object prefix is empty.") } policyCond := policyCondition{ matchType: "starts-with", @@ -117,7 +117,7 @@ func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { // SetBucket - Sets bucket at which objects will be uploaded to. func (p *PostPolicy) SetBucket(bucketName string) error { if strings.TrimSpace(bucketName) == "" || bucketName == "" { - return ErrInvalidArgument("Bucket name is empty.") + return errInvalidArgument("Bucket name is empty.") } policyCond := policyCondition{ matchType: "eq", @@ -131,11 +131,32 @@ func (p *PostPolicy) SetBucket(bucketName string) error { return nil } +// SetCondition - Sets condition for credentials, date and algorithm +func (p *PostPolicy) SetCondition(matchType, condition, value string) error { + if strings.TrimSpace(value) == "" || value == "" { + return errInvalidArgument("No value specified for condition") + } + + policyCond := policyCondition{ + matchType: matchType, + condition: "$" + condition, + value: value, + } + if condition == "X-Amz-Credential" || condition == "X-Amz-Date" || condition == "X-Amz-Algorithm" { + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[condition] = value + return nil + } + return errInvalidArgument("Invalid condition in policy") +} + // SetContentType - Sets content-type of the object for this policy // based upload. func (p *PostPolicy) SetContentType(contentType string) error { if strings.TrimSpace(contentType) == "" || contentType == "" { - return ErrInvalidArgument("No content type specified.") + return errInvalidArgument("No content type specified.") } policyCond := policyCondition{ matchType: "eq", @@ -153,24 +174,42 @@ func (p *PostPolicy) SetContentType(contentType string) error { // condition for all incoming uploads. func (p *PostPolicy) SetContentLengthRange(min, max int64) error { if min > max { - return ErrInvalidArgument("Minimum limit is larger than maximum limit.") + return errInvalidArgument("Minimum limit is larger than maximum limit.") } if min < 0 { - return ErrInvalidArgument("Minimum limit cannot be negative.") + return errInvalidArgument("Minimum limit cannot be negative.") } if max < 0 { - return ErrInvalidArgument("Maximum limit cannot be negative.") + return errInvalidArgument("Maximum limit cannot be negative.") } p.contentLengthRange.min = min p.contentLengthRange.max = max return nil } +// SetSuccessActionRedirect - Sets the redirect success url of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error { + if strings.TrimSpace(redirect) == "" || redirect == "" { + return errInvalidArgument("Redirect is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_redirect", + value: redirect, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_redirect"] = redirect + return nil +} + // SetSuccessStatusAction - Sets the status success code of the object for this policy // based upload. func (p *PostPolicy) SetSuccessStatusAction(status string) error { if strings.TrimSpace(status) == "" || status == "" { - return ErrInvalidArgument("Status is empty") + return errInvalidArgument("Status is empty") } policyCond := policyCondition{ matchType: "eq", @@ -188,10 +227,10 @@ func (p *PostPolicy) SetSuccessStatusAction(status string) error { // Can be retrieved through a HEAD request or an event. func (p *PostPolicy) SetUserMetadata(key string, value string) error { if strings.TrimSpace(key) == "" || key == "" { - return ErrInvalidArgument("Key is empty") + return errInvalidArgument("Key is empty") } if strings.TrimSpace(value) == "" || value == "" { - return ErrInvalidArgument("Value is empty") + return errInvalidArgument("Value is empty") } headerName := fmt.Sprintf("x-amz-meta-%s", key) policyCond := policyCondition{ @@ -210,10 +249,10 @@ func (p *PostPolicy) SetUserMetadata(key string, value string) error { // Can be retrieved through a HEAD request or an event. func (p *PostPolicy) SetUserData(key string, value string) error { if key == "" { - return ErrInvalidArgument("Key is empty") + return errInvalidArgument("Key is empty") } if value == "" { - return ErrInvalidArgument("Value is empty") + return errInvalidArgument("Value is empty") } headerName := fmt.Sprintf("x-amz-%s", key) policyCond := policyCondition{ @@ -231,18 +270,18 @@ func (p *PostPolicy) SetUserData(key string, value string) error { // addNewPolicy - internal helper to validate adding new policies. func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { - return ErrInvalidArgument("Policy fields are empty.") + return errInvalidArgument("Policy fields are empty.") } p.conditions = append(p.conditions, policyCond) return nil } -// Stringer interface for printing policy in json formatted string. +// String function for printing policy in json formatted string. func (p PostPolicy) String() string { return string(p.marshalJSON()) } -// marshalJSON - Provides Marshalled JSON in bytes. +// marshalJSON - Provides Marshaled JSON in bytes. func (p PostPolicy) marshalJSON() []byte { expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` var conditionsStr string @@ -264,7 +303,7 @@ func (p PostPolicy) marshalJSON() []byte { return []byte(retStr) } -// base64 - Produces base64 of PostPolicy's Marshalled json. +// base64 - Produces base64 of PostPolicy's Marshaled json. func (p PostPolicy) base64() string { return base64.StdEncoding.EncodeToString(p.marshalJSON()) } diff --git a/retry-continous.go b/retry-continous.go index f31dfa6f2..3d25883b0 100644 --- a/retry-continous.go +++ b/retry-continous.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/retry.go b/retry.go index b8e6ffd4a..116c68e8a 100644 --- a/retry.go +++ b/retry.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,10 +18,8 @@ package minio import ( - "net" + "context" "net/http" - "net/url" - "strings" "time" ) @@ -44,7 +42,7 @@ var DefaultRetryCap = time.Second * 30 // newRetryTimer creates a timer with exponentially increasing // delays until the maximum retry attempts are reached. -func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { +func (c Client) newRetryTimer(ctx context.Context, maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int { attemptCh := make(chan int) // computes the exponential backoff duration according to @@ -73,49 +71,21 @@ func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duratio defer close(attemptCh) for i := 0; i < maxRetry; i++ { select { - // Attempts start from 1. case attemptCh <- i + 1: - case <-doneCh: - // Stop the routine. + case <-ctx.Done(): + return + } + + select { + case <-time.After(exponentialBackoffWait(i)): + case <-ctx.Done(): return } - time.Sleep(exponentialBackoffWait(i)) } }() return attemptCh } -// isHTTPReqErrorRetryable - is http requests error retryable, such -// as i/o timeout, connection broken etc.. -func isHTTPReqErrorRetryable(err error) bool { - if err == nil { - return false - } - switch e := err.(type) { - case *url.Error: - switch e.Err.(type) { - case *net.DNSError, *net.OpError, net.UnknownNetworkError: - return true - } - if strings.Contains(err.Error(), "Connection closed by foreign host") { - return true - } else if strings.Contains(err.Error(), "net/http: TLS handshake timeout") { - // If error is - tlsHandshakeTimeoutError, retry. - return true - } else if strings.Contains(err.Error(), "i/o timeout") { - // If error is - tcp timeoutError, retry. - return true - } else if strings.Contains(err.Error(), "connection timed out") { - // If err is a net.Dial timeout, retry. - return true - } else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") { - // If error is transport connection broken, retry. - return true - } - } - return false -} - // List of AWS S3 error codes which are retryable. var retryableS3Codes = map[string]struct{}{ "RequestError": {}, @@ -143,6 +113,7 @@ var retryableHTTPStatusCodes = map[int]struct{}{ http.StatusInternalServerError: {}, http.StatusBadGateway: {}, http.StatusServiceUnavailable: {}, + http.StatusGatewayTimeout: {}, // Add more HTTP status codes here. } diff --git a/s3-endpoints.go b/s3-endpoints.go index 0eccd2407..125d86289 100644 --- a/s3-endpoints.go +++ b/s3-endpoints.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,11 +29,16 @@ var awsS3EndpointMap = map[string]string{ "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", + "eu-south-1": "s3.dualstack.eu-south-1.amazonaws.com", + "ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com", "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", "ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com", "ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com", "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", + "ap-northeast-3": "s3.dualstack.ap-northeast-3.amazonaws.com", + "af-south-1": "s3.dualstack.af-south-1.amazonaws.com", + "me-south-1": "s3.dualstack.me-south-1.amazonaws.com", "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", "us-gov-east-1": "s3.dualstack.us-gov-east-1.amazonaws.com", diff --git a/s3-error.go b/s3-error.go index 3b11776c2..f365157ee 100644 --- a/s3-error.go +++ b/s3-error.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/staticcheck.conf b/staticcheck.conf new file mode 100644 index 000000000..71cc6f536 --- /dev/null +++ b/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all", "-ST1005", "-ST1017", "-SA9004", "-ST1000", "-S1021"] \ No newline at end of file diff --git a/test-utils_test.go b/test-utils_test.go index 6f6443ccf..88b2fac56 100644 --- a/test-utils_test.go +++ b/test-utils_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/testcerts/private.key b/testcerts/private.key new file mode 100644 index 000000000..09448cfb2 --- /dev/null +++ b/testcerts/private.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCwUyKC2VOXy2+8 +gMQkRrDJ4aA7K5pgj6LHWu25GeY93x+8DLFyQ9BhoaMcAbs2Cmw91rONDrZ0gNql +yi5JX8t+iiVH8o6dcq6W8jNLnOw0GMNJ2/E1Ckfe5ktkn9synSSwMdnFp3cDk7Hb +2j6IiWrb+PXb7VGL47kDrG59iKQ350MiB3PNpd1ulHbi2m2ZC3WyoTTzlgeTXiXa +zhBIX4wsGVYs6RzS1bTZFBq05dIPNMJCRDVBSBYAAVuBxKjh4xvhC6j0rTCCK8uJ +752KioW4Y0VAEv6yUC4Ht6D9Jcj7gODTgb2irWSCNXFH+pZaI6wWlS8pPiL6iljY +P3kBeFiLAgMBAAECggEAKM20SM9+FryPSPILcdGiC7XY3JiEix/yLWwPYyxpKZw+ +vce6MJUc3dsH4e1Mo37Z+Z17w4LKGj/PWVpmR7iRYOEbK4EoG6t0V54I3NCdoJiy +aJ8rPHj6lMx6WfjcQuQ2n0eJ+8F7OyqsmBHzMqmKPwln69MJcfPq1rzKfOZoCj9p +0oZ+3Iv3roC4uH8peZFooCDUlzJL+8KiybVlemNfklKsHfRmL2vOdFBt+qvit6N/ +9JgBTX1mRx1+vqECj+TlVP//k3BTEPNfpIvsLCRN0eBbQcXYzu/gZfHwGnsy5Lxy +HaHNJnmLZMWSCc4iyCK7uN/BHXNUSSh3qqp4wqz0IQKBgQDdGbOuOVdJW4J1yYua +nDLAu2RQqvZTnoz1/b8jcrjS7tS1la5x1IN0Z9/VqTmkfxyHOK9ab1iVlklkIzjP +CmHnadUwr8vrLcdicFpjVLQU3O4ZqGrgiSGIPAotvOfAOuuzMs+r5ElW/MrGq0Pa +/3tGCTIx8JscZZjGhffUNoIGeQKBgQDMKB+flQB9Ajeo1JM4y3DtHbMJ5D2+/qoe +IkM7kN5K85EEpNwA2PMNKL2qthgM9YFU3K6Dj0gxPNsUKg3W7Ff2r+gaj8K+VjU0 +VbdhTZANbou8hU551swDUCUgquassMtZJIdZnQ7puwLGK67sZwWlOS6Pe1aqaNc5 +nY/MRbemIwKBgEySfykCkNlGCPuUDnZATE91VrudSewRyA3VkGHNdHcQ4bf1m9Gu +YMxqwRl1HxJ6Nz4ZgplWYJ6FyusUS7NgjCGiBIR1DbFoTFoqQROPnUJwdUGLk2Ap +/eP5ryjB+J0ZitGn8kY8rK2kpPGDFN/+hQnvW2PySTXfdbajZP4o1oU5AoGAMiT0 +x3yQlyPRSf2Uf5Gwlf0Ceb5+0Ae6/xXJT7sgbmZuyyY3B1pCMIw+MczyEVTHxHFD +x/qMb9OTt9swdQauAGBqcQO4gImqHcWj+hlT9Yied9qCUPjKOVIZHHH9oJL4D1gi +iodCH3SYlNYr69LOFyv5XLKdsdN4caVaqYDCP+MCgYEAwXyCmSml5oCxJeAOrEDC +Yg3vq3Ul9JO1wc8VDXn9+DtnFsuRHm0fTIxBelmis8AjIIq5DcObpk6wGYZwUiTU +LYQU7v0/Azujv9cl10GI8wzYKiRvExZDTn0sp6OKnau735qBUZvsRDqEQQ5n7waZ +xjlGmZyfah17laYZV9aJoHk= +-----END PRIVATE KEY----- diff --git a/testcerts/public.crt b/testcerts/public.crt new file mode 100644 index 000000000..71f4ccc65 --- /dev/null +++ b/testcerts/public.crt @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEKjCCApKgAwIBAgIRAPVKnAiFmDti207oQPs2VfUwDQYJKoZIhvcNAQELBQAw +VTEeMBwGA1UEChMVbWtjZXJ0IGRldmVsb3BtZW50IENBMRUwEwYDVQQLDAxoYXJz +aGFAcHJ0c2MxHDAaBgNVBAMME21rY2VydCBoYXJzaGFAcHJ0c2MwHhcNMTkwMTA3 +MTE1ODE2WhcNMjkwMTA3MTE1ODE2WjBEMScwJQYDVQQKEx5ta2NlcnQgZGV2ZWxv +cG1lbnQgY2VydGlmaWNhdGUxGTAXBgNVBAsMEGhhcnNoYUBiYWNrc3BhY2UwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCwUyKC2VOXy2+8gMQkRrDJ4aA7 +K5pgj6LHWu25GeY93x+8DLFyQ9BhoaMcAbs2Cmw91rONDrZ0gNqlyi5JX8t+iiVH +8o6dcq6W8jNLnOw0GMNJ2/E1Ckfe5ktkn9synSSwMdnFp3cDk7Hb2j6IiWrb+PXb +7VGL47kDrG59iKQ350MiB3PNpd1ulHbi2m2ZC3WyoTTzlgeTXiXazhBIX4wsGVYs +6RzS1bTZFBq05dIPNMJCRDVBSBYAAVuBxKjh4xvhC6j0rTCCK8uJ752KioW4Y0VA +Ev6yUC4Ht6D9Jcj7gODTgb2irWSCNXFH+pZaI6wWlS8pPiL6iljYP3kBeFiLAgMB +AAGjgYUwgYIwDgYDVR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwG +A1UdEwEB/wQCMAAwHwYDVR0jBBgwFoAUD575sRLoRt9dCxSRqbVctoEHt3MwLAYD +VR0RBCUwI4IJbG9jYWxob3N0hwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMA0GCSqG +SIb3DQEBCwUAA4IBgQC7qDRDNAHtfGtQs1UmvqWvHPI7qcBQgAibYq/Fox6X9ia1 +weQBfNWEoNOsk97wzbTz81ifXIQ0oV11kWE8EdsbXOf9xeFe9FmDn10d4bGjuMLd ++N3OtGKxLWry2xDYEsVHJZxVxwrf5GK6AJSJj/S837Nil6uRuwjvBVTbxmh1q0nV +x63V8Ag65rLS0fu8msSb64N5UHMCQk6IE+BFHY2gh0lBfZHMdtP4IbeCm756K78/ +WMeqjavGA3bqzVTixCHnJ9S2VLk/oQUS6mL869jM8+tN5VeE6Qsr1/Q5h+NaFCJg +Ed5xjT9mmnc3BLsOHflb1dg+rA90Zz9wphgebXbJhRNuuDRv81dtRPTzM+evGRGM +iRKtiDpog+K0HulfX2g4ZQ1dItEjYz+JYgUFJG+yCvBlNZ/WsTrIVcUCFKaG5rUC +aNqvKrSXfbzKQx7V/TtUAeSfRk7TBRn5qh8Pl+MmQQsB0L9hwTdnqTNn057tghu4 +3/yIIBpzdWPhQ5uv7Vc= +-----END CERTIFICATE----- diff --git a/transport.go b/transport.go index 88700cfe7..d5ad15b8b 100644 --- a/transport.go +++ b/transport.go @@ -1,8 +1,8 @@ // +build go1.7 go1.8 /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017-2018 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2018 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,31 +20,64 @@ package minio import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" "net" "net/http" + "os" "time" ) +// mustGetSystemCertPool - return system CAs or empty pool in case of error (or windows) +func mustGetSystemCertPool() *x509.CertPool { + pool, err := x509.SystemCertPool() + if err != nil { + return x509.NewCertPool() + } + return pool +} + // DefaultTransport - this default transport is similar to // http.DefaultTransport but with additional param DisableCompression // is set to true to avoid decompressing content with 'gzip' encoding. -var DefaultTransport http.RoundTripper = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - MaxIdleConnsPerHost: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - // Set this value so that the underlying transport round-tripper - // doesn't try to auto decode the body of objects with - // content-encoding set to `gzip`. - // - // Refer: - // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 - DisableCompression: true, +var DefaultTransport = func(secure bool) (*http.Transport, error) { + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + MaxIdleConns: 256, + MaxIdleConnsPerHost: 16, + ResponseHeaderTimeout: time.Minute, + IdleConnTimeout: time.Minute, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 10 * time.Second, + // Set this value so that the underlying transport round-tripper + // doesn't try to auto decode the body of objects with + // content-encoding set to `gzip`. + // + // Refer: + // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 + DisableCompression: true, + } + + if secure { + tr.TLSClientConfig = &tls.Config{ + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + MinVersion: tls.VersionTLS12, + } + if f := os.Getenv("SSL_CERT_FILE"); f != "" { + rootCAs := mustGetSystemCertPool() + data, err := ioutil.ReadFile(f) + if err == nil { + rootCAs.AppendCertsFromPEM(data) + } + tr.TLSClientConfig.RootCAs = rootCAs + } + } + return tr, nil } diff --git a/utils.go b/utils.go index 8483f3834..f1b711d22 100644 --- a/utils.go +++ b/utils.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,22 +19,45 @@ package minio import ( "crypto/md5" - "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/xml" + "fmt" + "hash" "io" "io/ioutil" "net" "net/http" "net/url" "regexp" + "strconv" "strings" + "sync" "time" - "github.com/minio/minio-go/pkg/s3utils" + md5simd "github.com/minio/md5-simd" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/sha256-simd" ) +func trimEtag(etag string) string { + etag = strings.TrimPrefix(etag, "\"") + return strings.TrimSuffix(etag, "\"") +} + +var expirationRegex = regexp.MustCompile(`expiry-date="(.*?)", rule-id="(.*?)"`) + +func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) { + if matches := expirationRegex.FindStringSubmatch(expiration); len(matches) == 3 { + expTime, err := time.Parse(http.TimeFormat, matches[1]) + if err != nil { + return time.Time{}, "" + } + return expTime, matches[2] + } + return time.Time{}, "" +} + // xmlDecoder provide decoded value in xml. func xmlDecoder(body io.Reader, v interface{}) error { d := xml.NewDecoder(body) @@ -43,14 +66,16 @@ func xmlDecoder(body io.Reader, v interface{}) error { // sum256 calculate sha256sum for an input byte array, returns hex encoded. func sum256Hex(data []byte) string { - hash := sha256.New() + hash := newSHA256Hasher() + defer hash.Close() hash.Write(data) return hex.EncodeToString(hash.Sum(nil)) } // sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded. func sumMD5Base64(data []byte) string { - hash := md5.New() + hash := newMd5Hasher() + defer hash.Close() hash.Write(data) return base64.StdEncoding.EncodeToString(hash.Sum(nil)) } @@ -64,12 +89,12 @@ func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { } if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." - return nil, ErrInvalidArgument(msg) + return nil, errInvalidArgument(msg) } } else { if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) { msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." - return nil, ErrInvalidArgument(msg) + return nil, errInvalidArgument(msg) } } // If secure is false, use 'http' scheme. @@ -123,19 +148,19 @@ var ( // Verify if input endpoint URL is valid. func isValidEndpointURL(endpointURL url.URL) error { if endpointURL == sentinelURL { - return ErrInvalidArgument("Endpoint url cannot be empty.") + return errInvalidArgument("Endpoint url cannot be empty.") } if endpointURL.Path != "/" && endpointURL.Path != "" { - return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.") + return errInvalidArgument("Endpoint url cannot have fully qualified paths.") } if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") { if !s3utils.IsAmazonEndpoint(endpointURL) { - return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") + return errInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") } } if strings.Contains(endpointURL.Host, ".googleapis.com") { if !s3utils.IsGoogleEndpoint(endpointURL) { - return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") + return errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") } } return nil @@ -145,35 +170,188 @@ func isValidEndpointURL(endpointURL url.URL) error { func isValidExpiry(expires time.Duration) error { expireSeconds := int64(expires / time.Second) if expireSeconds < 1 { - return ErrInvalidArgument("Expires cannot be lesser than 1 second.") + return errInvalidArgument("Expires cannot be lesser than 1 second.") } if expireSeconds > 604800 { - return ErrInvalidArgument("Expires cannot be greater than 7 days.") + return errInvalidArgument("Expires cannot be greater than 7 days.") } return nil } -// make a copy of http.Header -func cloneHeader(h http.Header) http.Header { - h2 := make(http.Header, len(h)) - for k, vv := range h { - vv2 := make([]string, len(vv)) - copy(vv2, vv) - h2[k] = vv2 +// Extract only necessary metadata header key/values by +// filtering them out with a list of custom header keys. +func extractObjMetadata(header http.Header) http.Header { + preserveKeys := []string{ + "Content-Type", + "Cache-Control", + "Content-Encoding", + "Content-Language", + "Content-Disposition", + "X-Amz-Storage-Class", + "X-Amz-Object-Lock-Mode", + "X-Amz-Object-Lock-Retain-Until-Date", + "X-Amz-Object-Lock-Legal-Hold", + "X-Amz-Website-Redirect-Location", + "X-Amz-Server-Side-Encryption", + "X-Amz-Tagging-Count", + "X-Amz-Meta-", + // Add new headers to be preserved. + // if you add new headers here, please extend + // PutObjectOptions{} to preserve them + // upon upload as well. + } + filteredHeader := make(http.Header) + for k, v := range header { + var found bool + for _, prefix := range preserveKeys { + if !strings.HasPrefix(k, prefix) { + continue + } + found = true + break + } + if found { + filteredHeader[k] = v + } } - return h2 + return filteredHeader } -// Filter relevant response headers from -// the HEAD, GET http response. The function takes -// a list of headers which are filtered out and -// returned as a new http header. -func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.Header) { - filteredHeader = cloneHeader(header) - for _, key := range filterKeys { - filteredHeader.Del(key) +// ToObjectInfo converts http header values into ObjectInfo type, +// extracts metadata and fills in all the necessary fields in ObjectInfo. +func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectInfo, error) { + var err error + // Trim off the odd double quotes from ETag in the beginning and end. + etag := trimEtag(h.Get("ETag")) + + // Parse content length is exists + var size int64 = -1 + contentLengthStr := h.Get("Content-Length") + if contentLengthStr != "" { + size, err = strconv.ParseInt(contentLengthStr, 10, 64) + if err != nil { + // Content-Length is not valid + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: fmt.Sprintf("Content-Length is not an integer, failed with %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } } - return filteredHeader + + // Parse Last-Modified has http time format. + date, err := time.Parse(http.TimeFormat, h.Get("Last-Modified")) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: fmt.Sprintf("Last-Modified time format is invalid, failed with %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + + // Fetch content type if any present. + contentType := strings.TrimSpace(h.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + + expiryStr := h.Get("Expires") + var expiry time.Time + if expiryStr != "" { + expiry, _ = time.Parse(http.TimeFormat, expiryStr) + } + + metadata := extractObjMetadata(h) + userMetadata := make(map[string]string) + for k, v := range metadata { + if strings.HasPrefix(k, "X-Amz-Meta-") { + userMetadata[strings.TrimPrefix(k, "X-Amz-Meta-")] = v[0] + } + } + userTags := s3utils.TagDecode(h.Get(amzTaggingHeader)) + + var tagCount int + if count := h.Get(amzTaggingCount); count != "" { + tagCount, err = strconv.Atoi(count) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: fmt.Sprintf("x-amz-tagging-count is not an integer, failed with %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration)) + + // Save object metadata info. + return ObjectInfo{ + ETag: etag, + Key: objectName, + Size: size, + LastModified: date, + ContentType: contentType, + Expires: expiry, + VersionID: h.Get(amzVersionID), + ReplicationStatus: h.Get(amzReplicationStatus), + Expiration: expTime, + ExpirationRuleID: ruleID, + // Extract only the relevant header keys describing the object. + // following function filters out a list of standard set of keys + // which are not part of object metadata. + Metadata: metadata, + UserMetadata: userMetadata, + UserTags: userTags, + UserTagCount: tagCount, + }, nil +} + +var readFull = func(r io.Reader, buf []byte) (n int, err error) { + // ReadFull reads exactly len(buf) bytes from r into buf. + // It returns the number of bytes copied and an error if + // fewer bytes were read. The error is EOF only if no bytes + // were read. If an EOF happens after reading some but not + // all the bytes, ReadFull returns ErrUnexpectedEOF. + // On return, n == len(buf) if and only if err == nil. + // If r returns an error having read at least len(buf) bytes, + // the error is dropped. + for n < len(buf) && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + // Some spurious io.Reader's return + // io.ErrUnexpectedEOF when nn == 0 + // this behavior is undocumented + // so we are on purpose not using io.ReadFull + // implementation because this can lead + // to custom handling, to avoid that + // we simply modify the original io.ReadFull + // implementation to avoid this issue. + // io.ErrUnexpectedEOF with nn == 0 really + // means that io.EOF + if err == io.ErrUnexpectedEOF && nn == 0 { + err = io.EOF + } + n += nn + } + if n >= len(buf) { + err = nil + } else if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return } // regCred matches credential string in HTTP header @@ -223,13 +401,17 @@ var supportedHeaders = []string{ "content-disposition", "content-language", "x-amz-website-redirect-location", + "x-amz-object-lock-mode", + "x-amz-metadata-directive", + "x-amz-object-lock-retain-until-date", "expires", + "x-amz-replication-status", // Add more supported headers here. } // isStorageClassHeader returns true if the header is a supported storage class header func isStorageClassHeader(headerKey string) bool { - return strings.ToLower(amzStorageClass) == strings.ToLower(headerKey) + return strings.EqualFold(amzStorageClass, headerKey) } // isStandardHeader returns true if header is a supported header and not a custom header @@ -270,3 +452,34 @@ func isAmzHeader(headerKey string) bool { return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) } + +var md5Pool = sync.Pool{New: func() interface{} { return md5.New() }} +var sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }} + +func newMd5Hasher() md5simd.Hasher { + return hashWrapper{Hash: md5Pool.New().(hash.Hash), isMD5: true} +} + +func newSHA256Hasher() md5simd.Hasher { + return hashWrapper{Hash: sha256Pool.New().(hash.Hash), isSHA256: true} +} + +// hashWrapper implements the md5simd.Hasher interface. +type hashWrapper struct { + hash.Hash + isMD5 bool + isSHA256 bool +} + +// Close will put the hasher back into the pool. +func (m hashWrapper) Close() { + if m.isMD5 && m.Hash != nil { + m.Reset() + md5Pool.Put(m.Hash) + } + if m.isSHA256 && m.Hash != nil { + m.Reset() + sha256Pool.Put(m.Hash) + } + m.Hash = nil +} diff --git a/utils_test.go b/utils_test.go index 2e60f77a2..e8628d229 100644 --- a/utils_test.go +++ b/utils_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,12 +19,11 @@ package minio import ( "fmt" - "net/http" "net/url" "testing" "time" - "github.com/minio/minio-go/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/s3utils" ) // Tests signature redacting function used @@ -52,21 +51,6 @@ func TestRedactSignature(t *testing.T) { } } -// Tests filter header function by filtering out -// some custom header keys. -func TestFilterHeader(t *testing.T) { - header := http.Header{} - header.Set("Content-Type", "binary/octet-stream") - header.Set("Content-Encoding", "gzip") - newHeader := filterHeader(header, []string{"Content-Type"}) - if len(newHeader) > 1 { - t.Fatalf("Unexpected size of the returned header, should be 1, got %d", len(newHeader)) - } - if newHeader.Get("Content-Encoding") != "gzip" { - t.Fatalf("Unexpected content-encoding value, expected 'gzip', got %s", newHeader.Get("Content-Encoding")) - } -} - // Tests for 'getEndpointURL(endpoint string, inSecure bool)'. func TestGetEndpointURL(t *testing.T) { testCases := []struct { @@ -89,11 +73,11 @@ func TestGetEndpointURL(t *testing.T) { {"192.168.1.1:9000", false, "http://192.168.1.1:9000", nil, true}, {"192.168.1.1:9000", true, "https://192.168.1.1:9000", nil, true}, {"s3.amazonaws.com:443", true, "https://s3.amazonaws.com:443", nil, true}, - {"13333.123123.-", true, "", ErrInvalidArgument(fmt.Sprintf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-")), false}, - {"13333.123123.-", true, "", ErrInvalidArgument(fmt.Sprintf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-")), false}, - {"storage.googleapis.com:4000", true, "", ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false}, - {"s3.aamzza.-", true, "", ErrInvalidArgument(fmt.Sprintf("Endpoint: %s does not follow ip address or domain name standards.", "s3.aamzza.-")), false}, - {"", true, "", ErrInvalidArgument("Endpoint: does not follow ip address or domain name standards."), false}, + {"13333.123123.-", true, "", errInvalidArgument(fmt.Sprintf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-")), false}, + {"13333.123123.-", true, "", errInvalidArgument(fmt.Sprintf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-")), false}, + {"storage.googleapis.com:4000", true, "", errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false}, + {"s3.aamzza.-", true, "", errInvalidArgument(fmt.Sprintf("Endpoint: %s does not follow ip address or domain name standards.", "s3.aamzza.-")), false}, + {"", true, "", errInvalidArgument("Endpoint: does not follow ip address or domain name standards."), false}, } for i, testCase := range testCases { @@ -128,7 +112,7 @@ func TestIsValidEndpointURL(t *testing.T) { // Flag indicating whether the test is expected to pass or not. shouldPass bool }{ - {"", ErrInvalidArgument("Endpoint url cannot be empty."), false}, + {"", errInvalidArgument("Endpoint url cannot be empty."), false}, {"/", nil, true}, {"https://s3.amazonaws.com", nil, true}, {"https://s3.cn-north-1.amazonaws.com.cn", nil, true}, @@ -138,10 +122,10 @@ func TestIsValidEndpointURL(t *testing.T) { {"https://storage.googleapis.com/", nil, true}, {"https://z3.amazonaws.com", nil, true}, {"https://mybalancer.us-east-1.elb.amazonaws.com", nil, true}, - {"192.168.1.1", ErrInvalidArgument("Endpoint url cannot have fully qualified paths."), false}, - {"https://amazon.googleapis.com/", ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false}, - {"https://storage.googleapis.com/bucket/", ErrInvalidArgument("Endpoint url cannot have fully qualified paths."), false}, - {"https://s3.amazonaws.com/bucket/object", ErrInvalidArgument("Endpoint url cannot have fully qualified paths."), false}, + {"192.168.1.1", errInvalidArgument("Endpoint url cannot have fully qualified paths."), false}, + {"https://amazon.googleapis.com/", errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false}, + {"https://storage.googleapis.com/bucket/", errInvalidArgument("Endpoint url cannot have fully qualified paths."), false}, + {"https://s3.amazonaws.com/bucket/object", errInvalidArgument("Endpoint url cannot have fully qualified paths."), false}, } for i, testCase := range testCases { @@ -234,9 +218,9 @@ func TestIsValidExpiry(t *testing.T) { // Flag to indicate whether the test should pass. shouldPass bool }{ - {100 * time.Millisecond, ErrInvalidArgument("Expires cannot be lesser than 1 second."), false}, - {604801 * time.Second, ErrInvalidArgument("Expires cannot be greater than 7 days."), false}, - {0 * time.Second, ErrInvalidArgument("Expires cannot be lesser than 1 second."), false}, + {100 * time.Millisecond, errInvalidArgument("Expires cannot be lesser than 1 second."), false}, + {604801 * time.Second, errInvalidArgument("Expires cannot be greater than 7 days."), false}, + {0 * time.Second, errInvalidArgument("Expires cannot be lesser than 1 second."), false}, {1 * time.Second, nil, true}, {10000 * time.Second, nil, true}, {999 * time.Second, nil, true}, @@ -270,12 +254,12 @@ func TestIsValidBucketName(t *testing.T) { // Flag to indicate whether test should Pass. shouldPass bool }{ - {".mybucket", ErrInvalidBucketName("Bucket name contains invalid characters"), false}, - {"mybucket.", ErrInvalidBucketName("Bucket name contains invalid characters"), false}, - {"mybucket-", ErrInvalidBucketName("Bucket name contains invalid characters"), false}, - {"my", ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters"), false}, - {"", ErrInvalidBucketName("Bucket name cannot be empty"), false}, - {"my..bucket", ErrInvalidBucketName("Bucket name contains invalid characters"), false}, + {".mybucket", errInvalidBucketName("Bucket name contains invalid characters"), false}, + {"mybucket.", errInvalidBucketName("Bucket name contains invalid characters"), false}, + {"mybucket-", errInvalidBucketName("Bucket name contains invalid characters"), false}, + {"my", errInvalidBucketName("Bucket name cannot be shorter than 3 characters"), false}, + {"", errInvalidBucketName("Bucket name cannot be empty"), false}, + {"my..bucket", errInvalidBucketName("Bucket name contains invalid characters"), false}, {"my.bucket.com", nil, true}, {"my-bucket", nil, true}, {"123my-bucket", nil, true},