diff options
Diffstat (limited to 'vendor/github.com/minio')
53 files changed, 1142 insertions, 307 deletions
diff --git a/vendor/github.com/minio/minio-go/v7/.golangci.yml b/vendor/github.com/minio/minio-go/v7/.golangci.yml index 7d1dd335..dfc0c2d5 100644 --- a/vendor/github.com/minio/minio-go/v7/.golangci.yml +++ b/vendor/github.com/minio/minio-go/v7/.golangci.yml @@ -8,9 +8,20 @@ linters: - typecheck - goimports - misspell + - revive - govet - - golint - ineffassign - gosimple - deadcode - structcheck + - gocritic + +issues: + exclude-use-default: false + exclude: + # todo fix these when we get enough time. + - "singleCaseSwitch: should rewrite switch statement to if statement" + - "unlambda: replace" + - "captLocal:" + - "ifElseChain:" + - "elseif:" diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile index ae640c4d..e7a3d758 100644 --- a/vendor/github.com/minio/minio-go/v7/Makefile +++ b/vendor/github.com/minio/minio-go/v7/Makefile @@ -9,7 +9,7 @@ checks: lint vet test examples functional-test lint: @mkdir -p ${GOPATH}/bin - @which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0) + @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.40.1 @echo "Running $@ check" @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go index e02ab84a..24f94e03 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go @@ -28,7 +28,7 @@ import ( ) // SetBucketEncryption sets the default encryption configuration on an existing bucket. -func (c Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error { +func (c *Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -70,7 +70,7 @@ func (c Client) SetBucketEncryption(ctx context.Context, bucketName string, conf } // RemoveBucketEncryption removes the default encryption configuration on a bucket with a context to control cancellations and timeouts. -func (c Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error { +func (c *Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -99,7 +99,7 @@ func (c Client) RemoveBucketEncryption(ctx context.Context, bucketName string) e // GetBucketEncryption gets the default encryption configuration // on an existing bucket with a context to control cancellations and timeouts. -func (c Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) { +func (c *Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go index e1fac813..7e219973 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go @@ -30,7 +30,7 @@ import ( ) // SetBucketLifecycle set the lifecycle on an existing bucket. -func (c Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error { +func (c *Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -51,7 +51,7 @@ func (c Client) SetBucketLifecycle(ctx context.Context, bucketName string, confi } // Saves a new bucket lifecycle. -func (c Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error { +func (c *Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -81,7 +81,7 @@ func (c Client) putBucketLifecycle(ctx context.Context, bucketName string, buf [ } // Remove lifecycle from a bucket. -func (c Client) removeBucketLifecycle(ctx context.Context, bucketName string) error { +func (c *Client) removeBucketLifecycle(ctx context.Context, bucketName string) error { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -101,7 +101,7 @@ func (c Client) removeBucketLifecycle(ctx context.Context, bucketName string) er } // GetBucketLifecycle fetch bucket lifecycle configuration -func (c Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) { +func (c *Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err @@ -120,7 +120,7 @@ func (c Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lif } // Request server for current bucket lifecycle. -func (c Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, error) { +func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, error) { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go index 76787eca..1e6f3da1 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go @@ -32,7 +32,7 @@ import ( ) // SetBucketNotification saves a new bucket notification with a context to control cancellations and timeouts. -func (c Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error { +func (c *Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -73,12 +73,12 @@ func (c Client) SetBucketNotification(ctx context.Context, bucketName string, co } // RemoveAllBucketNotification - Remove bucket notification clears all previously specified config -func (c Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error { +func (c *Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error { return c.SetBucketNotification(ctx, bucketName, notification.Configuration{}) } // GetBucketNotification returns current bucket notification configuration -func (c Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) { +func (c *Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return notification.Configuration{}, err @@ -87,7 +87,7 @@ func (c Client) GetBucketNotification(ctx context.Context, bucketName string) (b } // Request server for notification rules. -func (c Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) { +func (c *Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) { urlValues := make(url.Values) urlValues.Set("notification", "") @@ -121,12 +121,12 @@ func processBucketNotificationResponse(bucketName string, resp *http.Response) ( } // ListenNotification listen for all events, this is a MinIO specific API -func (c Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info { +func (c *Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info { return c.ListenBucketNotification(ctx, "", prefix, suffix, events) } // ListenBucketNotification listen for bucket events, this is a MinIO specific API -func (c Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info { +func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info { notificationInfoCh := make(chan notification.Info, 1) const notificationCapacity = 4 * 1024 * 1024 notificationEventBuffer := make([]byte, notificationCapacity) diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go index 7e01275d..57cdbc6e 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go @@ -27,7 +27,7 @@ import ( ) // SetBucketPolicy sets the access permissions on an existing bucket. -func (c Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error { +func (c *Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -43,7 +43,7 @@ func (c Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) } // Saves a new bucket policy. -func (c Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error { +func (c *Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -71,7 +71,7 @@ func (c Client) putBucketPolicy(ctx context.Context, bucketName, policy string) } // Removes all policies on a bucket. -func (c Client) removeBucketPolicy(ctx context.Context, bucketName string) error { +func (c *Client) removeBucketPolicy(ctx context.Context, bucketName string) error { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -91,7 +91,7 @@ func (c Client) removeBucketPolicy(ctx context.Context, bucketName string) error } // GetBucketPolicy returns the current policy -func (c Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) { +func (c *Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err @@ -108,7 +108,7 @@ func (c Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, } // Request server for current bucket policy. -func (c Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) { +func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go index 0b357d3e..b39211ec 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go @@ -33,12 +33,12 @@ import ( ) // RemoveBucketReplication removes a replication config on an existing bucket. -func (c Client) RemoveBucketReplication(ctx context.Context, bucketName string) error { +func (c *Client) RemoveBucketReplication(ctx context.Context, bucketName string) error { return c.removeBucketReplication(ctx, bucketName) } // SetBucketReplication sets a replication config on an existing bucket. -func (c Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { +func (c *Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -53,7 +53,7 @@ func (c Client) SetBucketReplication(ctx context.Context, bucketName string, cfg } // Saves a new bucket replication. -func (c Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { +func (c *Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -86,7 +86,7 @@ func (c Client) putBucketReplication(ctx context.Context, bucketName string, cfg } // Remove replication from a bucket. -func (c Client) removeBucketReplication(ctx context.Context, bucketName string) error { +func (c *Client) removeBucketReplication(ctx context.Context, bucketName string) error { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -107,7 +107,7 @@ func (c Client) removeBucketReplication(ctx context.Context, bucketName string) // GetBucketReplication fetches bucket replication configuration.If config is not // found, returns empty config with nil error. -func (c Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { +func (c *Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return cfg, err @@ -124,7 +124,7 @@ func (c Client) GetBucketReplication(ctx context.Context, bucketName string) (cf } // Request server for current bucket replication config. -func (c Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { +func (c *Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -153,7 +153,7 @@ func (c Client) getBucketReplication(ctx context.Context, bucketName string) (cf } // GetBucketReplicationMetrics fetches bucket replication status metrics -func (c Client) GetBucketReplicationMetrics(ctx context.Context, bucketName string) (s replication.Metrics, err error) { +func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName string) (s replication.Metrics, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return s, err @@ -199,7 +199,7 @@ func mustGetUUID() string { // ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication // is enabled in the replication config -func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) { +func (c *Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) { rID = mustGetUUID() _, err = c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, "", rID) if err != nil { @@ -208,16 +208,15 @@ func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, o return rID, nil } -// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication -// is enabled in the replication config -func (c Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (rinfo replication.ResyncTargetsInfo, err error) { - rID := mustGetUUID() - return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, rID) +// ResetBucketReplicationOnTarget kicks off replication of previously replicated objects if +// ExistingObjectReplication is enabled in the replication config +func (c *Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (replication.ResyncTargetsInfo, error) { + return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, mustGetUUID()) } // ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication // is enabled in the replication config -func (c Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string, resetID string) (rinfo replication.ResyncTargetsInfo, err error) { +func (c *Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string, resetID string) (rinfo replication.ResyncTargetsInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go index fcb966e6..1615f8f8 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go @@ -32,7 +32,7 @@ import ( // GetBucketTagging fetch tagging configuration for a bucket with a // context to control cancellations and timeouts. -func (c Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) { +func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err @@ -64,7 +64,7 @@ func (c Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags. // SetBucketTagging sets tagging configuration for a bucket // with a context to control cancellations and timeouts. -func (c Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error { +func (c *Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -107,7 +107,7 @@ func (c Client) SetBucketTagging(ctx context.Context, bucketName string, tags *t // RemoveBucketTagging removes tagging configuration for a // bucket with a context to control cancellations and timeouts. -func (c Client) RemoveBucketTagging(ctx context.Context, bucketName string) error { +func (c *Client) RemoveBucketTagging(ctx context.Context, bucketName string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go index e3ceeb33..930b1b93 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go @@ -27,7 +27,7 @@ import ( ) // SetBucketVersioning sets a bucket versioning configuration -func (c Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error { +func (c *Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -67,12 +67,12 @@ func (c Client) SetBucketVersioning(ctx context.Context, bucketName string, conf } // EnableVersioning - enable object versioning in given bucket. -func (c Client) EnableVersioning(ctx context.Context, bucketName string) error { +func (c *Client) EnableVersioning(ctx context.Context, bucketName string) error { return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Enabled"}) } // SuspendVersioning - suspend object versioning in given bucket. -func (c Client) SuspendVersioning(ctx context.Context, bucketName string) error { +func (c *Client) SuspendVersioning(ctx context.Context, bucketName string) error { return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Suspended"}) } @@ -102,7 +102,7 @@ func (b BucketVersioningConfiguration) Suspended() bool { // GetBucketVersioning gets the versioning configuration on // an existing bucket with a context to control cancellations and timeouts. -func (c Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) { +func (c *Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return BucketVersioningConfiguration{}, err diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go index 19a72ac3..f349f99e 100644 --- a/vendor/github.com/minio/minio-go/v7/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -201,7 +201,7 @@ func (opts CopySrcOptions) validate() (err error) { } // Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. -func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, +func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) { // Build headers. @@ -243,8 +243,10 @@ func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuck customHeader: headers, } if dstOpts.Internal.SourceVersionID != "" { - if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil { - return ObjectInfo{}, errInvalidArgument(err.Error()) + if dstOpts.Internal.SourceVersionID != nullVersionID { + if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil { + return ObjectInfo{}, errInvalidArgument(err.Error()) + } } urlValues := make(url.Values) urlValues.Set("versionId", dstOpts.Internal.SourceVersionID) @@ -282,7 +284,7 @@ func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuck return objInfo, nil } -func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, +func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) { headers := make(http.Header) @@ -335,7 +337,7 @@ func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, dest // uploadPartCopy - helper function to create a part in a multipart // upload via an upload-part-copy request // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html -func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, +func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, headers http.Header) (p CompletePart, err error) { // Build query parameters @@ -375,7 +377,7 @@ func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID str // and concatenates them into a new object using only server-side copying // operations. Optionally takes progress reader hook for applications to // look at current progress. -func (c Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) { +func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) { if len(srcs) < 1 || len(srcs) > maxPartsCount { return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.") } @@ -396,7 +398,7 @@ func (c Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ... var err error for i, src := range srcs { opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID} - srcObjectInfos[i], err = c.statObject(context.Background(), src.Bucket, src.Object, opts) + srcObjectInfos[i], err = c.StatObject(context.Background(), src.Bucket, src.Object, opts) if err != nil { return UploadInfo{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-copy-object.go b/vendor/github.com/minio/minio-go/v7/api-copy-object.go index 9af036ec..1c0ad2be 100644 --- a/vendor/github.com/minio/minio-go/v7/api-copy-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-copy-object.go @@ -25,7 +25,7 @@ import ( ) // CopyObject - copy a source object into a new object -func (c Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) { +func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) { if err := src.validate(); err != nil { return UploadInfo{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go index c45c4fdc..39df7eec 100644 --- a/vendor/github.com/minio/minio-go/v7/api-error-response.go +++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go @@ -18,8 +18,11 @@ package minio import ( + "bytes" "encoding/xml" "fmt" + "io" + "io/ioutil" "net/http" ) @@ -98,6 +101,19 @@ const ( reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." ) +// xmlDecodeAndBody reads the whole body up to 1MB and +// tries to XML decode it into v. +// The body that was read and any error from reading or decoding is returned. +func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { + // read the whole body (up to 1MB) + const maxBodyLength = 1 << 20 + body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) + if err != nil { + return nil, err + } + return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v) +} + // httpRespToErrorResponse returns a new encoded ErrorResponse // structure as error. func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { @@ -111,7 +127,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) Server: resp.Header.Get("Server"), } - err := xmlDecoder(resp.Body, &errResp) + errBody, err := xmlDecodeAndBody(resp.Body, &errResp) // Xml decoding failed with no body, fall back to HTTP headers. if err != nil { switch resp.StatusCode { @@ -156,10 +172,17 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) Key: objectName, } default: + msg := resp.Status + if len(errBody) > 0 { + msg = string(errBody) + if len(msg) > 1024 { + msg = msg[:1024] + "..." + } + } errResp = ErrorResponse{ StatusCode: resp.StatusCode, Code: resp.Status, - Message: resp.Status, + Message: msg, BucketName: bucketName, } } diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go index 031aa32e..b1291b6b 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go @@ -52,7 +52,7 @@ type accessControlPolicy struct { } // GetObjectACL get object ACLs -func (c Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) { +func (c *Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) { resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ bucketName: bucketName, objectName: objectName, @@ -75,7 +75,7 @@ func (c Client) GetObjectACL(ctx context.Context, bucketName, objectName string) return nil, err } - objInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{}) + objInfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions{}) if err != nil { return nil, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go index bccff457..98f5acf6 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go @@ -28,7 +28,7 @@ import ( // FGetObject - download contents of an object to a local file. // The options can be used to specify the GET request further. -func (c Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { +func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go index ef9dd45d..b9b96025 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go @@ -30,7 +30,7 @@ import ( ) // GetObject wrapper function that accepts a request context -func (c Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { +func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err @@ -139,7 +139,7 @@ func (c Client) GetObject(ctx context.Context, bucketName, objectName string, op // Remove range header if already set, for stat Operations to get original file size. delete(opts.headers, "Range") - objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions(opts)) + objectInfo, err = c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts)) if err != nil { resCh <- getResponse{ Error: err, @@ -162,7 +162,7 @@ func (c Client) GetObject(ctx context.Context, bucketName, objectName string, op if etag != "" && !snowball { opts.SetMatchETag(etag) } - objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions(opts)) + objectInfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts)) if err != nil { resCh <- getResponse{ Error: err, @@ -639,7 +639,7 @@ func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- // // For more information about the HTTP Range header. // go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. -func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { +func (c *Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { // Validate input arguments. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, ObjectInfo{}, nil, err diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go index 9e0cb214..0be858d1 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-options.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go @@ -25,7 +25,7 @@ import ( "github.com/minio/minio-go/v7/pkg/encrypt" ) -//AdvancedGetOptions for internal use by MinIO server - not intended for client use. +// AdvancedGetOptions for internal use by MinIO server - not intended for client use. type AdvancedGetOptions struct { ReplicationDeleteMarker bool ReplicationProxyRequest string diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go index 431bae54..85209b5d 100644 --- a/vendor/github.com/minio/minio-go/v7/api-list.go +++ b/vendor/github.com/minio/minio-go/v7/api-list.go @@ -36,7 +36,7 @@ import ( // fmt.Println(message) // } // -func (c Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { +func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { // Execute GET on service. resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex}) defer closeResponse(resp) @@ -56,8 +56,8 @@ func (c Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { return listAllMyBucketsResult.Buckets.Bucket, nil } -/// Bucket List Operations. -func (c Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { +// Bucket List Operations. +func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { // Allocate new list objects channel. objectStatCh := make(chan ObjectInfo, 1) // Default listing is delimited at "/" @@ -153,7 +153,7 @@ func (c Client) listObjectsV2(ctx context.Context, bucketName string, opts ListO // ?delimiter - A delimiter is a character you use to group keys. // ?start-after - Sets a marker to start listing lexically at this key onwards. // ?max-keys - Sets the maximum number of keys returned in the response body. -func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) { +func (c *Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) { // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ListBucketV2Result{}, err @@ -252,7 +252,7 @@ func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix return listBucketResult, nil } -func (c Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { +func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { // Allocate new list objects channel. objectStatCh := make(chan ObjectInfo, 1) // Default listing is delimited at "/" @@ -332,7 +332,7 @@ func (c Client) listObjects(ctx context.Context, bucketName string, opts ListObj return objectStatCh } -func (c Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { +func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { // Allocate new list objects channel. resultCh := make(chan ObjectInfo, 1) // Default listing is delimited at "/" @@ -443,7 +443,7 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName string, opts // ?delimiter - A delimiter is a character you use to group keys. // ?prefix - Limits the response to keys that begin with the specified prefix. // ?max-keys - Sets the maximum number of keys returned in the response body. -func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int, headers http.Header) (ListVersionsResult, error) { +func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int, headers http.Header) (ListVersionsResult, error) { // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ListVersionsResult{}, err @@ -540,7 +540,7 @@ func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, // ?delimiter - A delimiter is a character you use to group keys. // ?prefix - Limits the response to keys that begin with the specified prefix. // ?max-keys - Sets the maximum number of keys returned in the response body. -func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) { +func (c *Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) { // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ListBucketResult{}, err @@ -661,7 +661,7 @@ func (o *ListObjectsOptions) Set(key, value string) { // fmt.Println(object) // } // -func (c Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { +func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { if opts.WithVersions { return c.listObjectVersions(ctx, bucketName, opts) } @@ -697,12 +697,12 @@ func (c Client) ListObjects(ctx context.Context, bucketName string, opts ListObj // for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { // fmt.Println(message) // } -func (c Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { +func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive) } // listIncompleteUploads lists all incomplete uploads. -func (c Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { +func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { // Allocate channel for multipart uploads. objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) // Delimiter is set to "/" by default. @@ -788,7 +788,7 @@ func (c Client) listIncompleteUploads(ctx context.Context, bucketName, objectPre // ?delimiter - A delimiter is a character you use to group keys. // ?prefix - Limits the response to keys that begin with the specified prefix. // ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. -func (c Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { +func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set uploads. @@ -867,7 +867,7 @@ func (c Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMa } // listObjectParts list all object parts recursively. -func (c Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { +func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { // Part number marker for the next batch of request. var nextPartNumberMarker int partsInfo = make(map[int]ObjectPart) @@ -896,7 +896,7 @@ func (c Client) listObjectParts(ctx context.Context, bucketName, objectName, upl } // findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name. -func (c Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) { +func (c *Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) { var uploadIDs []string // Make list incomplete uploads recursive. isRecursive := true @@ -923,7 +923,7 @@ func (c Client) findUploadIDs(ctx context.Context, bucketName, objectName string // ?part-number-marker - Specifies the part after which listing should // begin. // ?max-parts - Maximum parts to be listed per request. -func (c Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { +func (c *Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set part number marker. diff --git a/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go index b139c168..0c027d55 100644 --- a/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go +++ b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go @@ -81,7 +81,7 @@ func newObjectLegalHold(status *LegalHoldStatus) (*objectLegalHold, error) { } // PutObjectLegalHold : sets object legal hold for a given object and versionID. -func (c Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error { +func (c *Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -135,7 +135,7 @@ func (c Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName s } // GetObjectLegalHold gets legal-hold status of given object. -func (c Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) { +func (c *Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err diff --git a/vendor/github.com/minio/minio-go/v7/api-object-lock.go b/vendor/github.com/minio/minio-go/v7/api-object-lock.go index 29f52b05..f0a43985 100644 --- a/vendor/github.com/minio/minio-go/v7/api-object-lock.go +++ b/vendor/github.com/minio/minio-go/v7/api-object-lock.go @@ -139,7 +139,7 @@ func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit } // SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. -func (c Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { +func (c *Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -184,7 +184,7 @@ func (c Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string } // GetObjectLockConfig gets object lock configuration of given bucket. -func (c Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { +func (c *Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", nil, nil, nil, err @@ -230,12 +230,12 @@ func (c Client) GetObjectLockConfig(ctx context.Context, bucketName string) (obj } // GetBucketObjectLockConfig gets object lock configuration of given bucket. -func (c Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { +func (c *Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { _, mode, validity, unit, err = c.GetObjectLockConfig(ctx, bucketName) return mode, validity, unit, err } // SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. -func (c Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { +func (c *Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { return c.SetBucketObjectLockConfig(ctx, bucketName, mode, validity, unit) } diff --git a/vendor/github.com/minio/minio-go/v7/api-object-retention.go b/vendor/github.com/minio/minio-go/v7/api-object-retention.go index 54f2762d..b29cb1f8 100644 --- a/vendor/github.com/minio/minio-go/v7/api-object-retention.go +++ b/vendor/github.com/minio/minio-go/v7/api-object-retention.go @@ -63,7 +63,7 @@ type PutObjectRetentionOptions struct { } // PutObjectRetention sets object retention for a given object and versionID. -func (c Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error { +func (c *Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -126,7 +126,7 @@ func (c Client) PutObjectRetention(ctx context.Context, bucketName, objectName s } // GetObjectRetention gets retention of given object. -func (c Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) { +func (c *Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, nil, err diff --git a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go index 2709efcd..305c36de 100644 --- a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go +++ b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go @@ -36,7 +36,7 @@ type PutObjectTaggingOptions struct { // PutObjectTagging replaces or creates object tag(s) and can target // a specific object version in a versioned bucket. -func (c Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error { +func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -87,7 +87,7 @@ type GetObjectTaggingOptions struct { // GetObjectTagging fetches object tag(s) with options to target // a specific object version in a versioned bucket. -func (c Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) { +func (c *Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -125,7 +125,7 @@ type RemoveObjectTaggingOptions struct { // RemoveObjectTagging removes object tag(s) with options to control a specific object // version in a versioned bucket -func (c Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error { +func (c *Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) diff --git a/vendor/github.com/minio/minio-go/v7/api-presigned.go b/vendor/github.com/minio/minio-go/v7/api-presigned.go index 80c363da..2e4bacf1 100644 --- a/vendor/github.com/minio/minio-go/v7/api-presigned.go +++ b/vendor/github.com/minio/minio-go/v7/api-presigned.go @@ -30,7 +30,7 @@ import ( // presignURL - Returns a presigned URL for an input 'method'. // Expires maximum is 7days - ie. 604800 and minimum is 1. -func (c Client) presignURL(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { +func (c *Client) presignURL(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) { // Input validation. if method == "" { return nil, errInvalidArgument("method cannot be empty.") @@ -45,11 +45,12 @@ func (c Client) presignURL(ctx context.Context, method string, bucketName string // Convert expires into seconds. expireSeconds := int64(expires / time.Second) reqMetadata := requestMetadata{ - presignURL: true, - bucketName: bucketName, - objectName: objectName, - expires: expireSeconds, - queryValues: reqParams, + presignURL: true, + bucketName: bucketName, + objectName: objectName, + expires: expireSeconds, + queryValues: reqParams, + extraPresignHeader: extraHeaders, } // Instantiate a new request. @@ -65,43 +66,54 @@ func (c Client) presignURL(ctx context.Context, method string, bucketName string // data without credentials. URL can have a maximum expiry of // upto 7days or a minimum of 1sec. Additionally you can override // a set of response headers using the query parameters. -func (c Client) PresignedGetObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { +func (c *Client) PresignedGetObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { if err = s3utils.CheckValidObjectName(objectName); err != nil { return nil, err } - return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams) + return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams, nil) } // PresignedHeadObject - Returns a presigned URL to access // object metadata without credentials. URL can have a maximum expiry // of upto 7days or a minimum of 1sec. Additionally you can override // a set of response headers using the query parameters. -func (c Client) PresignedHeadObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { +func (c *Client) PresignedHeadObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { if err = s3utils.CheckValidObjectName(objectName); err != nil { return nil, err } - return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams) + return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams, nil) } // PresignedPutObject - Returns a presigned URL to upload an object // without credentials. URL can have a maximum expiry of upto 7days // or a minimum of 1sec. -func (c Client) PresignedPutObject(ctx context.Context, bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { +func (c *Client) PresignedPutObject(ctx context.Context, bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { if err = s3utils.CheckValidObjectName(objectName); err != nil { return nil, err } - return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil) + return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil, nil) } -// Presign - returns a presigned URL for any http method of your choice -// along with custom request params. URL can have a maximum expiry of -// upto 7days or a minimum of 1sec. -func (c Client) Presign(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams) +// PresignHeader - similar to Presign() but allows including HTTP headers that +// will be used to build the signature. The request using the resulting URL will +// need to have the exact same headers to be added for signature validation to +// pass. +// +// FIXME: The extra header parameter should be included in Presign() in the next +// major version bump, and this function should then be deprecated. +func (c *Client) PresignHeader(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) { + return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, extraHeaders) +} + +// Presign - returns a presigned URL for any http method of your choice along +// with custom request params and extra signed headers. URL can have a maximum +// expiry of upto 7days or a minimum of 1sec. +func (c *Client) Presign(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, nil) } // PresignedPostPolicy - Returns POST urlString, form data to upload an object. -func (c Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) { +func (c *Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) { // Validate input arguments. if p.expiration.IsZero() { return nil, nil, errors.New("Expiration time must be specified") diff --git a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go index df9fe98a..1a6db3e1 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go @@ -26,8 +26,8 @@ import ( "github.com/minio/minio-go/v7/pkg/s3utils" ) -/// Bucket operations -func (c Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { +// Bucket operations +func (c *Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { // Validate the input arguments. if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { return err @@ -42,7 +42,7 @@ func (c Client) makeBucket(ctx context.Context, bucketName string, opts MakeBuck return err } -func (c Client) doMakeBucket(ctx context.Context, bucketName string, location string, objectLockEnabled bool) (err error) { +func (c *Client) doMakeBucket(ctx context.Context, bucketName string, location string, objectLockEnabled bool) (err error) { defer func() { // Save the location into cache on a successful makeBucket response. if err == nil { @@ -118,6 +118,6 @@ type MakeBucketOptions struct { // // For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html // For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations -func (c Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { +func (c *Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { return c.makeBucket(ctx, bucketName, opts) } diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go index f1653afe..149a536e 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go @@ -26,6 +26,8 @@ import ( "github.com/minio/minio-go/v7/pkg/s3utils" ) +const nullVersionID = "null" + // Verify if reader is *minio.Object func isObject(reader io.Reader) (ok bool) { _, ok = reader.(*Object) @@ -130,7 +132,7 @@ func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCou // getUploadID - fetch upload id if already present for an object name // or initiate a new request to fetch a new upload id. -func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { +func (c *Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go index 6c0f20df..4d29dfc1 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go @@ -27,7 +27,7 @@ import ( ) // FPutObject - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. -func (c Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) { +func (c *Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go index 873ec387..333321aa 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -37,7 +37,7 @@ import ( "github.com/minio/minio-go/v7/pkg/s3utils" ) -func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, +func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) if err != nil { @@ -56,7 +56,7 @@ func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName s return info, err } -func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { +func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err @@ -186,7 +186,7 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje } // initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. -func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { +func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return initiateMultipartUploadResult{}, err @@ -200,8 +200,10 @@ func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectN urlValues.Set("uploads", "") if opts.Internal.SourceVersionID != "" { - if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { - return initiateMultipartUploadResult{}, errInvalidArgument(err.Error()) + if opts.Internal.SourceVersionID != nullVersionID { + if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { + return initiateMultipartUploadResult{}, errInvalidArgument(err.Error()) + } } urlValues.Set("versionId", opts.Internal.SourceVersionID) } @@ -237,7 +239,7 @@ func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectN } // uploadPart - Uploads a part in a multipart upload. -func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, +func (c *Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -308,7 +310,7 @@ func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID } // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. -func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, +func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, complete completeMultipartUpload, opts PutObjectOptions) (UploadInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index f1cc9fbb..55cc4f14 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -41,7 +41,7 @@ import ( // - *minio.Object // - Any reader which has a method 'ReadAt()' // -func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, +func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { @@ -90,7 +90,7 @@ type uploadPartReq struct { // temporary files for staging all the data, these temporary files are // cleaned automatically when the caller i.e http client closes the // stream after uploading all the contents successfully. -func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, +func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, reader io.ReaderAt, size int64, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { @@ -240,7 +240,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa return uploadInfo, nil } -func (c Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string, +func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { @@ -369,7 +369,7 @@ func (c Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bu // putObject special function used Google Cloud Storage. This special function // is used for Google Cloud Storage since Google's multipart API is not S3 compatible. -func (c Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { +func (c *Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err @@ -430,7 +430,7 @@ func (c Client) putObject(ctx context.Context, bucketName, objectName string, re // putObjectDo - executes the put object http operation. // NOTE: You must have WRITE permissions on a bucket to add an object to it. -func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) { +func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err @@ -452,8 +452,10 @@ func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, contentSHA256Hex: sha256Hex, } if opts.Internal.SourceVersionID != "" { - if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { - return UploadInfo{}, errInvalidArgument(err.Error()) + if opts.Internal.SourceVersionID != nullVersionID { + if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } } urlValues := make(url.Values) urlValues.Set("versionId", opts.Internal.SourceVersionID) diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index f669b7d1..fb61b407 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -221,7 +221,7 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part // - For size input as -1 PutObject does a multipart Put operation // until input stream reaches EOF. Maximum object size that can // be uploaded through this operation will be 5TiB. -func (c Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, +func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, opts PutObjectOptions) (info UploadInfo, err error) { if objectSize < 0 && opts.DisableMultipart { return UploadInfo{}, errors.New("object size must be provided with disable multipart upload") @@ -235,7 +235,7 @@ func (c Client) PutObject(ctx context.Context, bucketName, objectName string, re return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) } -func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { +func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { // Check for largest object size allowed. if size > int64(maxMultipartPutObjectSize) { return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) @@ -269,7 +269,7 @@ func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName stri return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) } -func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { +func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go new file mode 100644 index 00000000..f9feda30 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go @@ -0,0 +1,215 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2021 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "archive/tar" + "bufio" + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" + "time" + + "github.com/klauspost/compress/s2" +) + +// SnowballOptions contains options for PutObjectsSnowball calls. +type SnowballOptions struct { + // Opts is options applied to all objects. + Opts PutObjectOptions + + // Processing options: + + // InMemory specifies that all objects should be collected in memory + // before they are uploaded. + // If false a temporary file will be created. + InMemory bool + + // Compress enabled content compression before upload. + // Compression will typically reduce memory and network usage, + // Compression can safely be enabled with MinIO hosts. + Compress bool +} + +// SnowballObject contains information about a single object to be added to the snowball. +type SnowballObject struct { + // Key is the destination key, including prefix. + Key string + + // Size is the content size of this object. + Size int64 + + // Modtime to apply to the object. + ModTime time.Time + + // Content of the object. + // Exactly 'Size' number of bytes must be provided. + Content io.Reader + + // Close will be called when an object has finished processing. + // Note that if PutObjectsSnowball returns because of an error, + // objects not consumed from the input will NOT have been closed. + // Leave as nil for no callback. + Close func() +} + +type nopReadSeekCloser struct { + io.ReadSeeker +} + +func (n nopReadSeekCloser) Close() error { + return nil +} + +// This is available as io.ReadSeekCloser from go1.16 +type readSeekCloser interface { + io.Reader + io.Closer + io.Seeker +} + +// PutObjectsSnowball will put multiple objects with a single put call. +// A (compressed) TAR file will be created which will contain multiple objects. +// The key for each object will be used for the destination in the specified bucket. +// Total size should be < 5TB. +// This function blocks until 'objs' is closed and the content has been uploaded. +func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts SnowballOptions, objs <-chan SnowballObject) (err error) { + err = opts.Opts.validate() + if err != nil { + return err + } + var tmpWriter io.Writer + var getTmpReader func() (rc readSeekCloser, sz int64, err error) + if opts.InMemory { + b := bytes.NewBuffer(nil) + tmpWriter = b + getTmpReader = func() (readSeekCloser, int64, error) { + return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil + } + } else { + f, err := ioutil.TempFile("", "s3-putsnowballobjects-*") + if err != nil { + return err + } + name := f.Name() + tmpWriter = f + var once sync.Once + defer once.Do(func() { + f.Close() + }) + defer os.Remove(name) + getTmpReader = func() (readSeekCloser, int64, error) { + once.Do(func() { + f.Close() + }) + f, err := os.Open(name) + if err != nil { + return nil, 0, err + } + st, err := f.Stat() + if err != nil { + return nil, 0, err + } + return f, st.Size(), nil + } + } + var flush = func() error { return nil } + if !opts.Compress { + if !opts.InMemory { + // Insert buffer for writes. + buf := bufio.NewWriterSize(tmpWriter, 1<<20) + flush = buf.Flush + tmpWriter = buf + } + } else { + s2c := s2.NewWriter(tmpWriter, s2.WriterBetterCompression()) + flush = s2c.Close + defer s2c.Close() + tmpWriter = s2c + } + t := tar.NewWriter(tmpWriter) + +objectLoop: + for { + select { + case <-ctx.Done(): + return ctx.Err() + case obj, ok := <-objs: + if !ok { + break objectLoop + } + + closeObj := func() {} + if obj.Close != nil { + closeObj = obj.Close + } + + // Trim accidental slash prefix. + obj.Key = strings.TrimPrefix(obj.Key, "/") + header := tar.Header{ + Typeflag: tar.TypeReg, + Name: obj.Key, + Size: obj.Size, + ModTime: obj.ModTime, + Format: tar.FormatPAX, + } + if err := t.WriteHeader(&header); err != nil { + closeObj() + return err + } + n, err := io.Copy(t, obj.Content) + if err != nil { + closeObj() + return err + } + if n != obj.Size { + closeObj() + return io.ErrUnexpectedEOF + } + closeObj() + } + } + // Flush tar + err = t.Flush() + if err != nil { + return err + } + // Flush compression + err = flush() + if err != nil { + return err + } + if opts.Opts.UserMetadata == nil { + opts.Opts.UserMetadata = map[string]string{} + } + opts.Opts.UserMetadata["X-Amz-Meta-Snowball-Auto-Extract"] = "true" + opts.Opts.DisableMultipart = true + rc, sz, err := getTmpReader() + if err != nil { + return err + } + defer rc.Close() + rand := c.random.Uint64() + _, err = c.PutObject(ctx, bucketName, fmt.Sprintf("snowball-upload-%x.tar", rand), rc, sz, opts.Opts) + return err +} diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go index 24e4d3f5..fd3f1e12 100644 --- a/vendor/github.com/minio/minio-go/v7/api-remove.go +++ b/vendor/github.com/minio/minio-go/v7/api-remove.go @@ -29,9 +29,16 @@ import ( "github.com/minio/minio-go/v7/pkg/s3utils" ) -// BucketOptions special headers to purge buckets, only +//revive:disable + +// Deprecated: BucketOptions will be renamed to RemoveBucketOptions in future versions. +type BucketOptions = RemoveBucketOptions + +//revive:enable + +// RemoveBucketOptions special headers to purge buckets, only // useful when endpoint is MinIO -type BucketOptions struct { +type RemoveBucketOptions struct { ForceDelete bool } @@ -40,7 +47,7 @@ type BucketOptions struct { // All objects (including all object versions and delete markers) // in the bucket will be deleted forcibly if bucket options set // ForceDelete to 'true'. -func (c Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, opts BucketOptions) error { +func (c *Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, opts RemoveBucketOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -77,7 +84,7 @@ func (c Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, // // All objects (including all object versions and delete markers). // in the bucket must be deleted before successfully attempting this request. -func (c Client) RemoveBucket(ctx context.Context, bucketName string) error { +func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -120,7 +127,7 @@ type RemoveObjectOptions struct { } // RemoveObject removes an object from a bucket. -func (c Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { +func (c *Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -132,7 +139,7 @@ func (c Client) RemoveObject(ctx context.Context, bucketName, objectName string, return c.removeObject(ctx, bucketName, objectName, opts) } -func (c Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { +func (c *Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { // Get resources properly escaped and lined up before // using them in http request. @@ -246,7 +253,7 @@ type RemoveObjectsOptions struct { // RemoveObjects removes multiple objects from a bucket while // it is possible to specify objects versions which are received from // objectsCh. Remove failures are sent back via error channel. -func (c Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError { +func (c *Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError { errorCh := make(chan RemoveObjectError, 1) // Validate if bucket name is valid. @@ -291,7 +298,7 @@ func hasInvalidXMLChar(str string) bool { } // Generate and call MultiDelete S3 requests based on entries received from objectsCh -func (c Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, errorCh chan<- RemoveObjectError, opts RemoveObjectsOptions) { +func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, errorCh chan<- RemoveObjectError, opts RemoveObjectsOptions) { maxEntries := 1000 finish := false urlValues := make(url.Values) @@ -389,7 +396,7 @@ func (c Client) removeObjects(ctx context.Context, bucketName string, objectsCh } // RemoveIncompleteUpload aborts an partially uploaded object. -func (c Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error { +func (c *Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -416,7 +423,7 @@ func (c Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectNa // abortMultipartUpload aborts a multipart upload for the given // uploadID, all previously uploaded parts are deleted. -func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { +func (c *Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err diff --git a/vendor/github.com/minio/minio-go/v7/api-restore.go b/vendor/github.com/minio/minio-go/v7/api-restore.go index dd7ce7a3..69eefec0 100644 --- a/vendor/github.com/minio/minio-go/v7/api-restore.go +++ b/vendor/github.com/minio/minio-go/v7/api-restore.go @@ -68,7 +68,7 @@ type MetadataEntry struct { // S3 holds properties of the copy of the archived object type S3 struct { - AccessControlList *AccessControlList `xml:"AccessControlList,omiempty"` + AccessControlList *AccessControlList `xml:"AccessControlList,omitempty"` BucketName string Prefix string CannedACL *string `xml:"CannedACL,omitempty"` @@ -110,7 +110,7 @@ func (r *RestoreRequest) SetDays(v int) { r.Days = &v } -// SetDays sets the GlacierJobParameters of the restore request +// SetGlacierJobParameters sets the GlacierJobParameters of the restore request func (r *RestoreRequest) SetGlacierJobParameters(v GlacierJobParameters) { r.GlacierJobParameters = &v } @@ -141,7 +141,7 @@ func (r *RestoreRequest) SetOutputLocation(v OutputLocation) { } // RestoreObject is a implementation of https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html AWS S3 API -func (c Client) RestoreObject(ctx context.Context, bucketName, objectName, versionID string, req RestoreRequest) error { +func (c *Client) RestoreObject(ctx context.Context, bucketName, objectName, versionID string, req RestoreRequest) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go index 37ed97b7..948f8a74 100644 --- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go @@ -121,8 +121,8 @@ func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement return err } - switch se := t.(type) { - case xml.StartElement: + se, ok := t.(xml.StartElement) + if ok { tagName := se.Name.Local switch tagName { case "Name", "Prefix", diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go index c5e6d309..74c1df5f 100644 --- a/vendor/github.com/minio/minio-go/v7/api-select.go +++ b/vendor/github.com/minio/minio-go/v7/api-select.go @@ -438,7 +438,7 @@ const ( ) // SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API. -func (c Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { +func (c *Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go index 12a1bf93..6deb5f5d 100644 --- a/vendor/github.com/minio/minio-go/v7/api-stat.go +++ b/vendor/github.com/minio/minio-go/v7/api-stat.go @@ -27,7 +27,7 @@ import ( // BucketExists verifies if bucket exists and you have permission to access it. Allows for a Context to // control cancellations and timeouts. -func (c Client) BucketExists(ctx context.Context, bucketName string) (bool, error) { +func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return false, err @@ -58,19 +58,7 @@ func (c Client) BucketExists(ctx context.Context, bucketName string) (bool, erro } // StatObject verifies if object exists and you have permission to access. -func (c Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectInfo{}, err - } - return c.statObject(ctx, bucketName, objectName, opts) -} - -// Lower level API for statObject supporting pre-conditions and range headers. -func (c Client) statObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { +func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectInfo{}, err diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index c8550ef1..b5eaa690 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -46,7 +46,7 @@ import ( // Client implements Amazon S3 compatible methods. type Client struct { - /// Standard options. + // Standard options. // Parsed endpoint url provided by the user. endpointURL *url.URL @@ -92,9 +92,7 @@ type Client struct { md5Hasher func() md5simd.Hasher sha256Hasher func() md5simd.Hasher - healthCheckCh chan struct{} - healthCheck int32 - lastOnline time.Time + healthStatus int32 } // Options for New method @@ -113,7 +111,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.14" + libraryVersion = "v7.0.16" ) // User Agent should always following the below style. @@ -312,7 +310,7 @@ func privateNew(endpoint string, opts *Options) (*Client, error) { clnt.lookup = opts.BucketLookup // healthcheck is not initialized - clnt.healthCheck = unknown + clnt.healthStatus = unknown // Return. return clnt, nil @@ -404,30 +402,30 @@ const ( // IsOnline returns true if healthcheck enabled and client is online func (c *Client) IsOnline() bool { - switch atomic.LoadInt32(&c.healthCheck) { - case online, unknown: - return true - } - return false + return !c.IsOffline() +} + +// sets online healthStatus to offline +func (c *Client) markOffline() { + atomic.CompareAndSwapInt32(&c.healthStatus, online, offline) } // IsOffline returns true if healthcheck enabled and client is offline func (c *Client) IsOffline() bool { - return !c.IsOnline() + return atomic.LoadInt32(&c.healthStatus) == offline } // HealthCheck starts a healthcheck to see if endpoint is up. Returns a context cancellation function // and and error if health check is already started func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) { - if atomic.LoadInt32(&c.healthCheck) == online { - return nil, fmt.Errorf("health check running already") + if atomic.LoadInt32(&c.healthStatus) == online { + return nil, fmt.Errorf("health check is running") } if hcDuration < 1*time.Second { return nil, fmt.Errorf("health check duration should be atleast 1 second") } ctx, cancelFn := context.WithCancel(context.Background()) - c.healthCheckCh = make(chan struct{}) - atomic.StoreInt32(&c.healthCheck, online) + atomic.StoreInt32(&c.healthStatus, online) probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-") go func(duration time.Duration) { timer := time.NewTimer(duration) @@ -435,27 +433,24 @@ func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, erro for { select { case <-ctx.Done(): - close(c.healthCheckCh) - atomic.StoreInt32(&c.healthCheck, unknown) + atomic.StoreInt32(&c.healthStatus, unknown) return case <-timer.C: - timer.Reset(duration) // Do health check the first time and ONLY if the connection is marked offline - if c.IsOffline() || c.lastOnline.IsZero() { - _, err := c.getBucketLocation(context.Background(), probeBucketName) - if err != nil && IsNetworkOrHostDown(err, false) { - atomic.StoreInt32(&c.healthCheck, offline) + if c.IsOffline() { + gctx, gcancel := context.WithTimeout(context.Background(), 3*time.Second) + _, err := c.getBucketLocation(gctx, probeBucketName) + gcancel() + if IsNetworkOrHostDown(err, false) { + // Still network errors do not need to do anything. + continue } switch ToErrorResponse(err).Code { case "NoSuchBucket", "AccessDenied", "": - c.lastOnline = time.Now() - atomic.StoreInt32(&c.healthCheck, online) + atomic.CompareAndSwapInt32(&c.healthStatus, offline, online) } } - case <-c.healthCheckCh: - // set offline if client saw a network error - atomic.StoreInt32(&c.healthCheck, offline) } } }(hcDuration) @@ -468,11 +463,12 @@ type requestMetadata struct { presignURL bool // User supplied. - bucketName string - objectName string - queryValues url.Values - customHeader http.Header - expires int64 + bucketName string + objectName string + queryValues url.Values + customHeader http.Header + extraPresignHeader http.Header + expires int64 // Generated by our internal code. bucketLocation string @@ -483,7 +479,7 @@ type requestMetadata struct { } // dumpHTTP - dump HTTP request and response. -func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { +func (c *Client) dumpHTTP(req *http.Request, resp *http.Response) error { // Starts http dump. _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") if err != nil { @@ -543,8 +539,14 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { } // do - execute http request. -func (c Client) do(req *http.Request) (*http.Response, error) { - resp, err := c.httpClient.Do(req) +func (c *Client) do(req *http.Request) (resp *http.Response, err error) { + defer func() { + if IsNetworkOrHostDown(err, false) { + c.markOffline() + } + }() + + resp, err = c.httpClient.Do(req) if err != nil { // Handle this specifically for now until future Golang versions fix this issue properly. if urlErr, ok := err.(*url.Error); ok { @@ -587,7 +589,11 @@ var successStatus = []int{ // executeMethod - instantiates a given method, and retries the // request upon any error up to maxRetries attempts in a binomially // delayed manner using a standard back off algorithm. -func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { +func (c *Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { + if c.IsOffline() { + return nil, errors.New(c.endpointURL.String() + " is offline.") + } + var retryable bool // Indicates if request can be retried. var bodySeeker io.Seeker // Extracted seeker from io.Reader. var reqRetry = MaxRetry // Indicates how many times we can retry the request @@ -641,24 +647,11 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque continue // Retry. } - if atomic.LoadInt32(&c.healthCheck) != unknown && IsNetworkOrHostDown(err, false) { - select { - case c.healthCheckCh <- struct{}{}: - default: - } - } return nil, err } // Initiate the request. res, err = c.do(req) if err != nil { - if atomic.LoadInt32(&c.healthCheck) != unknown && IsNetworkOrHostDown(err, false) { - select { - case c.healthCheckCh <- struct{}{}: - default: - } - } - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return nil, err } @@ -753,7 +746,7 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque } // newRequest - instantiate a new HTTP request for a given method. -func (c Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) { +func (c *Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) { // If no method is supplied default to 'POST'. if method == "" { method = http.MethodPost @@ -821,6 +814,14 @@ func (c Client) newRequest(ctx context.Context, method string, metadata requestM if signerType.IsAnonymous() { return nil, errInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") } + if metadata.extraPresignHeader != nil { + if signerType.IsV2() { + return nil, errInvalidArgument("Extra signed headers for Presign with Signature V2 is not supported.") + } + for k, v := range metadata.extraPresignHeader { + req.Header.Set(k, v[0]) + } + } if signerType.IsV2() { // Presign URL with signature v2. req = signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) @@ -893,7 +894,7 @@ func (c Client) newRequest(ctx context.Context, method string, metadata requestM } // set User agent. -func (c Client) setUserAgent(req *http.Request) { +func (c *Client) setUserAgent(req *http.Request) { req.Header.Set("User-Agent", libraryUserAgent) if c.appInfo.appName != "" && c.appInfo.appVersion != "" { req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) @@ -901,7 +902,7 @@ func (c Client) setUserAgent(req *http.Request) { } // makeTargetURL make a new target url. -func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { +func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { host := c.endpointURL.Host // For Amazon S3 endpoint, try to fetch location based endpoint. if s3utils.IsAmazonEndpoint(*c.endpointURL) { @@ -946,13 +947,13 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isV if isVirtualHostStyle { urlStr = scheme + "://" + bucketName + "." + host + "/" if objectName != "" { - urlStr = urlStr + s3utils.EncodePath(objectName) + urlStr += s3utils.EncodePath(objectName) } } else { // If not fall back to using path style. urlStr = urlStr + bucketName + "/" if objectName != "" { - urlStr = urlStr + s3utils.EncodePath(objectName) + urlStr += s3utils.EncodePath(objectName) } } } diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go index 156150f6..19dabd25 100644 --- a/vendor/github.com/minio/minio-go/v7/bucket-cache.go +++ b/vendor/github.com/minio/minio-go/v7/bucket-cache.go @@ -73,7 +73,7 @@ func (r *bucketLocationCache) Delete(bucketName string) { // GetBucketLocation - get location for the bucket name from location cache, if not // fetch freshly by making a new request. -func (c Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) { +func (c *Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) { if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err } @@ -82,7 +82,7 @@ func (c Client) GetBucketLocation(ctx context.Context, bucketName string) (strin // getBucketLocation - Get location for the bucketName from location map cache, if not // fetch freshly by making a new request. -func (c Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) { +func (c *Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) { if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err } @@ -169,7 +169,7 @@ func processBucketLocationResponse(resp *http.Response, bucketName string) (buck } // getBucketLocationRequest - Wrapper creates a new getBucketLocation request. -func (c Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) { +func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) { // Set location query. urlValues := make(url.Values) urlValues.Set("location", "") @@ -188,7 +188,7 @@ func (c Client) getBucketLocationRequest(ctx context.Context, bucketName string) var urlStr string - //only support Aliyun OSS for virtual hosted path, compatible Amazon & Google Endpoint + // only support Aliyun OSS for virtual hosted path, compatible Amazon & Google Endpoint if isVirtualHost && s3utils.IsAliyunOSSEndpoint(targetURL) { urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location" } else { diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go index 7caa42d9..dee83b87 100644 --- a/vendor/github.com/minio/minio-go/v7/constants.go +++ b/vendor/github.com/minio/minio-go/v7/constants.go @@ -17,7 +17,7 @@ package minio -/// Multipart upload defaults. +// Multipart upload defaults. // absMinPartSize - absolute minimum part size (5 MiB) below which // a part in a multipart upload may not be uploaded. diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go index 7bef7497..fdba5274 100644 --- a/vendor/github.com/minio/minio-go/v7/core.go +++ b/vendor/github.com/minio/minio-go/v7/core.go @@ -125,9 +125,3 @@ func (c Core) PutBucketPolicy(ctx context.Context, bucket, bucketPolicy string) func (c Core) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { return c.getObject(ctx, bucketName, objectName, opts) } - -// StatObject is a lower level API implemented to support special -// conditions matching etag, modtime on a request. -func (c Core) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { - return c.statObject(ctx, bucketName, objectName, opts) -} diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index 7a168993..b8950dd2 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -20,6 +20,7 @@ package main import ( + "archive/zip" "bytes" "context" "errors" @@ -32,6 +33,7 @@ import ( "net/http" "net/url" "os" + "path" "path/filepath" "reflect" "runtime" @@ -151,6 +153,8 @@ func logError(testName string, function string, args map[string]interface{}, sta // addition to NotImplemented error returned from server if isErrNotImplemented(err) { ignoredLog(testName, function, args, startTime, message).Info() + } else if isRunOnFail() { + failureLog(testName, function, args, startTime, alert, message, err).Error() } else { failureLog(testName, function, args, startTime, alert, message, err).Fatal() } @@ -260,6 +264,10 @@ func isErrNotImplemented(err error) bool { return minio.ToErrorResponse(err).Code == "NotImplemented" } +func isRunOnFail() bool { + return os.Getenv("RUN_ON_FAIL") == "1" +} + func init() { // If server endpoint is not set, all tests default to // using https://play.min.io @@ -2885,8 +2893,8 @@ func testFPutObject() { logError(testName, function, args, startTime, "", "StatObject failed", err) return } - if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-gtar or application/octet-stream, got "+rGTar.ContentType, err) + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-tar or application/octet-stream, got "+rGTar.ContentType, err) return } @@ -3174,6 +3182,189 @@ func testPutObjectContext() { } +// Tests get object with s3zip extensions. +func testGetObjectS3Zip() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{"x-minio-extract": true} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + ".zip" + args["objectName"] = objectName + + var zipFile bytes.Buffer + zw := zip.NewWriter(&zipFile) + rng := rand.New(rand.NewSource(0xc0cac01a)) + const nFiles = 500 + for i := 0; i <= nFiles; i++ { + if i == nFiles { + // Make one large, compressible file. + i = 1000000 + } + b := make([]byte, i) + if i < nFiles { + rng.Read(b) + } + wc, err := zw.Create(fmt.Sprintf("test/small/file-%d.bin", i)) + if err != nil { + logError(testName, function, args, startTime, "", "zw.Create failed", err) + return + } + wc.Write(b) + } + err = zw.Close() + if err != nil { + logError(testName, function, args, startTime, "", "zw.Close failed", err) + return + } + buf := zipFile.Bytes() + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(len(buf))+", got "+string(st.Size), err) + return + } + r.Close() + + zr, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf))) + if err != nil { + logError(testName, function, args, startTime, "", "zip.NewReader failed", err) + return + } + lOpts := minio.ListObjectsOptions{} + lOpts.Set("x-minio-extract", "true") + lOpts.Prefix = objectName + "/" + lOpts.Recursive = true + list := c.ListObjects(context.Background(), bucketName, lOpts) + var listed = map[string]minio.ObjectInfo{} + for item := range list { + if item.Err != nil { + break + } + listed[item.Key] = item + } + if len(listed) == 0 { + // Assume we are running against non-minio. + args["SKIPPED"] = true + ignoredLog(testName, function, args, startTime, "s3zip does not appear to be present").Info() + return + } + + for _, file := range zr.File { + if file.FileInfo().IsDir() { + continue + } + args["zipfile"] = file.Name + zfr, err := file.Open() + if err != nil { + logError(testName, function, args, startTime, "", "file.Open failed", err) + return + } + want, err := ioutil.ReadAll(zfr) + if err != nil { + logError(testName, function, args, startTime, "", "fzip file read failed", err) + return + } + + opts := minio.GetObjectOptions{} + opts.Set("x-minio-extract", "true") + key := path.Join(objectName, file.Name) + r, err = c.GetObject(context.Background(), bucketName, key, opts) + if err != nil { + terr := minio.ToErrorResponse(err) + if terr.StatusCode != http.StatusNotFound { + logError(testName, function, args, startTime, "", "GetObject failed", err) + } + return + } + got, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + r.Close() + if !bytes.Equal(want, got) { + logError(testName, function, args, startTime, "", "Content mismatch", err) + return + } + oi, ok := listed[key] + if !ok { + logError(testName, function, args, startTime, "", "Object Missing", fmt.Errorf("%s not present in listing", key)) + return + } + if int(oi.Size) != len(got) { + logError(testName, function, args, startTime, "", "Object Size Incorrect", fmt.Errorf("listing %d, read %d", oi.Size, len(got))) + return + } + delete(listed, key) + } + delete(args, "zipfile") + if len(listed) > 0 { + logError(testName, function, args, startTime, "", "Extra listed objects", fmt.Errorf("left over: %v", listed)) + return + } + successLogger(testName, function, args, startTime).Info() +} + // Tests get object ReaderSeeker interface methods. func testGetObjectReadSeekFunctional() { // initialize logging params @@ -5902,6 +6093,63 @@ func testFunctional() { return } + function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)" + functionAll += ", " + function + presignExtraHeaders := map[string][]string{ + "mysecret": {"abcxxx"}, + } + args = map[string]interface{}{ + "method": "PUT", + "bucketName": bucketName, + "objectName": objectName + "-presign-custom", + "expires": 3600 * time.Second, + "extraHeaders": presignExtraHeaders, + } + presignedURL, err := c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders) + if err != nil { + logError(testName, function, args, startTime, "", "Presigned failed", err) + return + } + + // Generate data more than 32K + buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) + + req, err = http.NewRequest(http.MethodPut, presignedURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err) + return + } + + req.Header.Add("mysecret", "abcxxx") + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err) + return + } + + // Download the uploaded object to verify + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presign-custom", + } + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presign-custom", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of uploaded custom-presigned object failed", err) + return + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch on custom-presigned object upload verification", err) + return + } + function = "RemoveObject(bucketName, objectName)" functionAll += ", " + function args = map[string]interface{}{ @@ -5938,6 +6186,14 @@ func testFunctional() { return } + args["objectName"] = objectName + "-presign-custom" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-presign-custom", minio.RemoveObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + function = "RemoveBucket(bucketName)" functionAll += ", " + function args = map[string]interface{}{ @@ -6476,8 +6732,8 @@ func testFPutObjectV2() { logError(testName, function, args, startTime, "", "Unexpected size", nil) return } - if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err) + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-tar , got "+rGTar.ContentType, err) return } @@ -10680,27 +10936,44 @@ func testFunctionalV2() { return } - function = "GetObject(bucketName, objectName)" - functionAll += ", " + function + // Download the uploaded object to verify args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName + "-presigned", } newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) + logError(testName, function, args, startTime, "", "GetObject of uploaded presigned object failed", err) return } newReadBytes, err = ioutil.ReadAll(newReader) if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) + logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err) return } newReader.Close() if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) + logError(testName, function, args, startTime, "", "Bytes mismatch on presigned object upload verification", err) + return + } + + function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)" + functionAll += ", " + function + presignExtraHeaders := map[string][]string{ + "mysecret": {"abcxxx"}, + } + args = map[string]interface{}{ + "method": "PUT", + "bucketName": bucketName, + "objectName": objectName + "-presign-custom", + "expires": 3600 * time.Second, + "extraHeaders": presignExtraHeaders, + } + _, err = c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders) + if err == nil { + logError(testName, function, args, startTime, "", "Presigned with extra headers succeeded", err) return } @@ -11596,6 +11869,7 @@ func testRemoveObjects() { _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "Error uploading object", err) + return } // Replace with smaller... @@ -11617,7 +11891,8 @@ func testRemoveObjects() { } err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) if err != nil { - log.Fatalln(err) + logError(testName, function, args, startTime, "", "Error setting retention", err) + return } objectsCh := make(chan minio.ObjectInfo) @@ -11627,7 +11902,8 @@ func testRemoveObjects() { // List all objects from a bucket-name with a matching prefix. for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { if object.Err != nil { - log.Fatalln(object.Err) + logError(testName, function, args, startTime, "", "Error listing objects", object.Err) + return } objectsCh <- object } @@ -11650,7 +11926,8 @@ func testRemoveObjects() { // List all objects from a bucket-name with a matching prefix. for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { if object.Err != nil { - log.Fatalln(object.Err) + logError(testName, function, args, startTime, "", "Error listing objects", object.Err) + return } objectsCh1 <- object } @@ -11730,6 +12007,7 @@ func main() { testPutObjectStreaming() testGetObjectSeekEnd() testGetObjectClosedTwice() + testGetObjectS3Zip() testRemoveMultipleObjects() testFPutObjectMultipart() testFPutObject() diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go index bbd25ed8..485a717e 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go @@ -112,7 +112,7 @@ func (m *IAM) Retrieve() (Value, error) { return &WebIdentityToken{Token: string(token)}, nil }, - roleARN: os.Getenv("AWS_ROLE_ARN"), + RoleARN: os.Getenv("AWS_ROLE_ARN"), roleSessionName: os.Getenv("AWS_ROLE_SESSION_NAME"), } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts-tls-identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts-tls-identity.go new file mode 100644 index 00000000..2e37025a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts-tls-identity.go @@ -0,0 +1,192 @@ +// MinIO Go Library for Amazon S3 Compatible Cloud Storage +// Copyright 2021 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "crypto/tls" + "encoding/xml" + "errors" + "io" + "net" + "net/http" + "net/url" + "strconv" + "time" +) + +// CertificateIdentityOption is an optional AssumeRoleWithCertificate +// parameter - e.g. a custom HTTP transport configuration or S3 credental +// livetime. +type CertificateIdentityOption func(*STSCertificateIdentity) + +// CertificateIdentityWithTransport returns a CertificateIdentityOption that +// customizes the STSCertificateIdentity with the given http.RoundTripper. +func CertificateIdentityWithTransport(t http.RoundTripper) CertificateIdentityOption { + return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.Client.Transport = t }) +} + +// CertificateIdentityWithExpiry returns a CertificateIdentityOption that +// customizes the STSCertificateIdentity with the given livetime. +// +// Fetched S3 credentials will have the given livetime if the STS server +// allows such credentials. +func CertificateIdentityWithExpiry(livetime time.Duration) CertificateIdentityOption { + return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.S3CredentialLivetime = livetime }) +} + +// A STSCertificateIdentity retrieves S3 credentials from the MinIO STS API and +// rotates those credentials once they expire. +type STSCertificateIdentity struct { + Expiry + + // STSEndpoint is the base URL endpoint of the STS API. + // For example, https://minio.local:9000 + STSEndpoint string + + // S3CredentialLivetime is the duration temp. S3 access + // credentials should be valid. + // + // It represents the access credential livetime requested + // by the client. The STS server may choose to issue + // temp. S3 credentials that have a different - usually + // shorter - livetime. + // + // The default livetime is one hour. + S3CredentialLivetime time.Duration + + // Client is the HTTP client used to authenticate and fetch + // S3 credentials. + // + // A custom TLS client configuration can be specified by + // using a custom http.Transport: + // Client: http.Client { + // Transport: &http.Transport{ + // TLSClientConfig: &tls.Config{}, + // }, + // } + Client http.Client +} + +var _ Provider = (*STSWebIdentity)(nil) // compiler check + +// NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates +// to the given STS endpoint with the given TLS certificate and retrieves and +// rotates S3 credentials. +func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, options ...CertificateIdentityOption) (*Credentials, error) { + if endpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if _, err := url.Parse(endpoint); err != nil { + return nil, err + } + var identity = &STSCertificateIdentity{ + STSEndpoint: endpoint, + Client: http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 5 * time.Second, + TLSClientConfig: &tls.Config{ + Certificates: []tls.Certificate{certificate}, + }, + }, + }, + } + for _, option := range options { + option(identity) + } + return New(identity), nil +} + +// Retrieve fetches a new set of S3 credentials from the configured +// STS API endpoint. +func (i *STSCertificateIdentity) Retrieve() (Value, error) { + endpointURL, err := url.Parse(i.STSEndpoint) + if err != nil { + return Value{}, err + } + var livetime = i.S3CredentialLivetime + if livetime == 0 { + livetime = 1 * time.Hour + } + + queryValues := url.Values{} + queryValues.Set("Action", "AssumeRoleWithCertificate") + queryValues.Set("Version", STSVersion) + endpointURL.RawQuery = queryValues.Encode() + + req, err := http.NewRequest(http.MethodPost, endpointURL.String(), nil) + if err != nil { + return Value{}, err + } + req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10)) + + resp, err := i.Client.Do(req) + if err != nil { + return Value{}, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + if resp.StatusCode != http.StatusOK { + return Value{}, errors.New(resp.Status) + } + + const MaxSize = 10 * 1 << 20 + var body io.Reader = resp.Body + if resp.ContentLength > 0 && resp.ContentLength < MaxSize { + body = io.LimitReader(body, resp.ContentLength) + } else { + body = io.LimitReader(body, MaxSize) + } + + var response assumeRoleWithCertificateResponse + if err = xml.NewDecoder(body).Decode(&response); err != nil { + return Value{}, err + } + i.SetExpiration(response.Result.Credentials.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: response.Result.Credentials.AccessKey, + SecretAccessKey: response.Result.Credentials.SecretKey, + SessionToken: response.Result.Credentials.SessionToken, + SignerType: SignatureDefault, + }, nil +} + +// Expiration returns the expiration time of the current S3 credentials. +func (i *STSCertificateIdentity) Expiration() time.Time { return i.expiration } + +type assumeRoleWithCertificateResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCertificateResponse" json:"-"` + Result struct { + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:"Credentials" json:"credentials,omitempty"` + } `xml:"AssumeRoleWithCertificateResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go index 0fa5b55f..bdde1fa3 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go @@ -124,7 +124,7 @@ func stripPassword(err error) error { // LDAP Identity with a specified session policy. The `policy` parameter must be // a JSON string specifying the policy document. // -// DEPRECATED: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead. +// Deprecated: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead. func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) { return New(&LDAPIdentity{ Client: &http.Client{Transport: http.DefaultTransport}, diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go index c1109140..25ca751d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go @@ -78,9 +78,9 @@ type STSWebIdentity struct { // This is a customer provided function and is mandatory. GetWebIDTokenExpiry func() (*WebIdentityToken, error) - // roleARN is the Amazon Resource Name (ARN) of the role that the caller is + // RoleARN is the Amazon Resource Name (ARN) of the role that the caller is // assuming. - roleARN string + RoleARN string // roleSessionName is the identifier for the assumed role session. roleSessionName string @@ -164,7 +164,7 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession // Retrieve retrieves credentials from the MinIO service. // Error will be returned if the request fails. func (m *STSWebIdentity) Retrieve() (Value, error) { - a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.roleARN, m.roleSessionName, m.GetWebIDTokenExpiry) + a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.GetWebIDTokenExpiry) if err != nil { return Value{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go index 83870a36..96f1101c 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go @@ -21,9 +21,12 @@ package lifecycle import ( "encoding/json" "encoding/xml" + "errors" "time" ) +var errMissingStorageClass = errors.New("storage-class cannot be empty") + // AbortIncompleteMultipartUpload structure, not supported yet on MinIO type AbortIncompleteMultipartUpload struct { XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"` @@ -50,13 +53,14 @@ func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.Sta // (or suspended) to request server delete noncurrent object versions at a // specific period in the object's lifetime. type NoncurrentVersionExpiration struct { - XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"` - NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"` + XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"` + MaxNoncurrentVersions int `xml:"MaxNoncurrentVersions,omitempty"` } // MarshalXML if non-current days not set to non zero value func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if n.IsDaysNull() { + if n.isNull() { return nil } type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration @@ -68,13 +72,17 @@ func (n NoncurrentVersionExpiration) IsDaysNull() bool { return n.NoncurrentDays == ExpirationDays(0) } +func (n NoncurrentVersionExpiration) isNull() bool { + return n.IsDaysNull() && n.MaxNoncurrentVersions == 0 +} + // NoncurrentVersionTransition structure, set this action to request server to // transition noncurrent object versions to different set storage classes // at a specific period in the object's lifetime. type NoncurrentVersionTransition struct { XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"` StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` - NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays" json:"NoncurrentDays"` } // IsDaysNull returns true if days field is null @@ -87,10 +95,30 @@ func (n NoncurrentVersionTransition) IsStorageClassEmpty() bool { return n.StorageClass == "" } +func (n NoncurrentVersionTransition) isNull() bool { + return n.StorageClass == "" +} + +// UnmarshalJSON implements NoncurrentVersionTransition JSONify +func (n *NoncurrentVersionTransition) UnmarshalJSON(b []byte) error { + type noncurrentVersionTransition NoncurrentVersionTransition + var nt noncurrentVersionTransition + err := json.Unmarshal(b, &nt) + if err != nil { + return err + } + + if nt.StorageClass == "" { + return errMissingStorageClass + } + *n = NoncurrentVersionTransition(nt) + return nil +} + // MarshalXML is extended to leave out // <NoncurrentVersionTransition></NoncurrentVersionTransition> tags func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if n.IsDaysNull() || n.IsStorageClassEmpty() { + if n.isNull() { return nil } type noncurrentVersionTransitionWrapper NoncurrentVersionTransition @@ -114,25 +142,44 @@ type Transition struct { XMLName xml.Name `xml:"Transition" json:"-"` Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` - Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"` + Days ExpirationDays `xml:"Days" json:"Days"` +} + +// UnmarshalJSON returns an error if storage-class is empty. +func (t *Transition) UnmarshalJSON(b []byte) error { + type transition Transition + var tr transition + err := json.Unmarshal(b, &tr) + if err != nil { + return err + } + + if tr.StorageClass == "" { + return errMissingStorageClass + } + *t = Transition(tr) + return nil } // MarshalJSON customizes json encoding by omitting empty values func (t Transition) MarshalJSON() ([]byte, error) { + if t.IsNull() { + return nil, nil + } type transition struct { Date *ExpirationDate `json:"Date,omitempty"` StorageClass string `json:"StorageClass,omitempty"` - Days *ExpirationDays `json:"Days,omitempty"` + Days *ExpirationDays `json:"Days"` } newt := transition{ StorageClass: t.StorageClass, } - if !t.IsDaysNull() { - newt.Days = &t.Days - } + if !t.IsDateNull() { newt.Date = &t.Date + } else { + newt.Days = &t.Days } return json.Marshal(newt) } @@ -147,9 +194,9 @@ func (t Transition) IsDateNull() bool { return t.Date.Time.IsZero() } -// IsNull returns true if both date and days fields are null +// IsNull returns true if no storage-class is set. func (t Transition) IsNull() bool { - return t.IsDaysNull() && t.IsDateNull() + return t.StorageClass == "" } // MarshalXML is transition is non null @@ -364,10 +411,10 @@ func (r Rule) MarshalJSON() ([]byte, error) { if !r.Transition.IsNull() { newr.Transition = &r.Transition } - if !r.NoncurrentVersionExpiration.IsDaysNull() { + if !r.NoncurrentVersionExpiration.isNull() { newr.NoncurrentVersionExpiration = &r.NoncurrentVersionExpiration } - if !r.NoncurrentVersionTransition.IsDaysNull() { + if !r.NoncurrentVersionTransition.isNull() { newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go index 0211f1fb..97c1492b 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -103,15 +103,21 @@ func (c *Config) AddRule(opts Options) error { if err != nil { return err } + var compatSw bool // true if RoleArn is used with new mc client and older minio version prior to multisite if opts.RoleArn != "" { tokens := strings.Split(opts.RoleArn, ":") if len(tokens) != 6 { return fmt.Errorf("invalid format for replication Role Arn: %v", opts.RoleArn) } - if !strings.HasPrefix(opts.RoleArn, "arn:aws:iam") { + switch { + case strings.HasPrefix(opts.RoleArn, "arn:minio:replication") && len(c.Rules) == 0: + c.Role = opts.RoleArn + compatSw = true + case strings.HasPrefix(opts.RoleArn, "arn:aws:iam"): + c.Role = opts.RoleArn + default: return fmt.Errorf("RoleArn invalid for AWS replication configuration: %v", opts.RoleArn) } - c.Role = opts.RoleArn } var status Status @@ -151,7 +157,11 @@ func (c *Config) AddRule(opts Options) error { destBucket := opts.DestBucket // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 { - return fmt.Errorf("destination bucket needs to be in Arn format") + if len(btokens) == 1 && compatSw { + destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket) + } else { + return fmt.Errorf("destination bucket needs to be in Arn format") + } } dmStatus := Disabled if opts.ReplicateDeleteMarkers != "" { @@ -228,7 +238,7 @@ func (c *Config) AddRule(opts Options) error { return err } // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for MinIO configuration - if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") { + if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && !compatSw { for i := range c.Rules { c.Rules[i].Destination.Bucket = c.Role } @@ -254,7 +264,7 @@ func (c *Config) EditRule(opts Options) error { return fmt.Errorf("rule ID missing") } // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for non AWS. - if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") { + if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && len(c.Rules) > 1 { for i := range c.Rules { c.Rules[i].Destination.Bucket = c.Role } @@ -484,10 +494,7 @@ func (r Rule) validateStatus() error { } func (r Rule) validateFilter() error { - if err := r.Filter.Validate(); err != nil { - return err - } - return nil + return r.Filter.Validate() } // Prefix - a rule can either have prefix under <filter></filter> or under @@ -712,9 +719,12 @@ type Metrics struct { FailedCount uint64 `json:"failedReplicationCount"` } +// ResyncTargetsInfo provides replication target information to resync replicated data. type ResyncTargetsInfo struct { Targets []ResyncTarget `json:"target,omitempty"` } + +// ResyncTarget provides the replica resources and resetID to initiate resync replication. type ResyncTarget struct { Arn string `json:"arn"` ResetID string `json:"resetid"` diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go index fea25d6e..44945464 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go @@ -171,6 +171,7 @@ func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { return false } return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" || + endpointURL.Host == "s3-fips.us-gov-west-1.amazonaws.com" || endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com" } @@ -211,7 +212,7 @@ func IsGoogleEndpoint(endpointURL url.URL) bool { // Expects ascii encoded strings - from output of urlEncodePath func percentEncodeSlash(s string) string { - return strings.Replace(s, "/", "%2F", -1) + return strings.ReplaceAll(s, "/", "%2F") } // QueryEncode - encodes query values in their URL encoded form. In diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go index 71821a26..b6ea78f7 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go @@ -233,16 +233,7 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { if idx > 0 { buf.WriteByte(',') } - if strings.Contains(v, "\n") { - // TODO: "Unfold" long headers that - // span multiple lines (as allowed by - // RFC 2616, section 4.2) by replacing - // the folding white-space (including - // new-line) by a single space. - buf.WriteString(v) - } else { - buf.WriteString(v) - } + buf.WriteString(v) } buf.WriteByte('\n') } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go index 67572b20..ce64c37d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go @@ -42,22 +42,22 @@ const ( ServiceTypeSTS = "sts" ) -/// -/// Excerpts from @lsegal - -/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. -/// -/// User-Agent: -/// -/// This is ignored from signing because signing this causes -/// problems with generating pre-signed URLs (that are executed -/// by other agents) or when customers pass requests through -/// proxies, which may modify the user-agent. -/// -/// -/// Authorization: -/// -/// Is skipped for obvious reasons -/// +// +// Excerpts from @lsegal - +// https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. +// +// User-Agent: +// +// This is ignored from signing because signing this causes +// problems with generating pre-signed URLs (that are executed +// by other agents) or when customers pass requests through +// proxies, which may modify the user-agent. +// +// +// Authorization: +// +// Is skipped for obvious reasons +// var v4IgnoredHeaders = map[string]bool{ "Authorization": true, "User-Agent": true, @@ -118,7 +118,9 @@ func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) strin headers = append(headers, strings.ToLower(k)) vals[strings.ToLower(k)] = vv } - headers = append(headers, "host") + if !headerExists("host", headers) { + headers = append(headers, "host") + } sort.Strings(headers) var buf bytes.Buffer @@ -130,7 +132,7 @@ func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) strin switch { case k == "host": buf.WriteString(getHostAddr(&req)) - fallthrough + buf.WriteByte('\n') default: for idx, v := range vals[k] { if idx > 0 { @@ -144,6 +146,15 @@ func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) strin return buf.String() } +func headerExists(key string, headers []string) bool { + for _, k := range headers { + if k == key { + return true + } + } + return false +} + // getSignedHeaders generate all signed request headers. // i.e lexically sorted, semicolon-separated list of lowercase // request header names. @@ -155,7 +166,9 @@ func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { } headers = append(headers, strings.ToLower(k)) } - headers = append(headers, "host") + if !headerExists("host", headers) { + headers = append(headers, "host") + } sort.Strings(headers) return strings.Join(headers, ";") } @@ -170,7 +183,7 @@ func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { // <SignedHeaders>\n // <HashedPayload> func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string { - req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) + req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") canonicalRequest := strings.Join([]string{ req.Method, s3utils.EncodePath(req.URL.Path), @@ -186,7 +199,7 @@ func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashe func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string { stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" stringToSign = stringToSign + getScope(location, t, serviceType) + "\n" - stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) + stringToSign += hex.EncodeToString(sum256([]byte(canonicalRequest))) return stringToSign } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go index 2192a369..b54fa4c7 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go @@ -44,6 +44,10 @@ func sumHMAC(key []byte, data []byte) []byte { // getHostAddr returns host header if available, otherwise returns host from URL func getHostAddr(req *http.Request) string { + host := req.Header.Get("host") + if host != "" && req.Host != host { + return host + } if req.Host != "" { return req.Host } diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go index 31a7308c..7aa96e0d 100644 --- a/vendor/github.com/minio/minio-go/v7/post-policy.go +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -316,8 +316,8 @@ func (p PostPolicy) marshalJSON() []byte { } retStr := "{" retStr = retStr + expirationStr + "," - retStr = retStr + conditionsStr - retStr = retStr + "}" + retStr += conditionsStr + retStr += "}" return []byte(retStr) } diff --git a/vendor/github.com/minio/minio-go/v7/retry-continous.go b/vendor/github.com/minio/minio-go/v7/retry-continous.go index 3d25883b..b54081d0 100644 --- a/vendor/github.com/minio/minio-go/v7/retry-continous.go +++ b/vendor/github.com/minio/minio-go/v7/retry-continous.go @@ -20,7 +20,7 @@ package minio import "time" // newRetryTimerContinous creates a timer with exponentially increasing delays forever. -func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { +func (c *Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { attemptCh := make(chan int) // normalize jitter to the range [0, 1.0] @@ -39,7 +39,7 @@ func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, ji if attempt > maxAttempt { attempt = maxAttempt } - //sleep = random_between(0, min(cap, base * 2 ** attempt)) + // sleep = random_between(0, min(cap, base * 2 ** attempt)) sleep := unit * time.Duration(1<<uint(attempt)) if sleep > cap { sleep = cap diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go index 598af297..5611770d 100644 --- a/vendor/github.com/minio/minio-go/v7/retry.go +++ b/vendor/github.com/minio/minio-go/v7/retry.go @@ -42,7 +42,7 @@ var DefaultRetryCap = time.Second // newRetryTimer creates a timer with exponentially increasing // delays until the maximum retry attempts are reached. -func (c Client) newRetryTimer(ctx context.Context, maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int { +func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int { attemptCh := make(chan int) // computes the exponential backoff duration according to @@ -56,7 +56,7 @@ func (c Client) newRetryTimer(ctx context.Context, maxRetry int, unit time.Durat jitter = MaxJitter } - //sleep = random_between(0, min(cap, base * 2 ** attempt)) + // sleep = random_between(0, min(cap, base * 2 ** attempt)) sleep := unit * time.Duration(1<<uint(attempt)) if sleep > cap { sleep = cap diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index e7f90a3b..297df7bf 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -52,7 +52,7 @@ var expirationRegex = regexp.MustCompile(`expiry-date="(.*?)", rule-id="(.*?)"`) func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) { if matches := expirationRegex.FindStringSubmatch(expiration); len(matches) == 3 { - expTime, err := time.Parse(http.TimeFormat, matches[1]) + expTime, err := parseRFC7231Time(matches[1]) if err != nil { return time.Time{}, "" } @@ -73,7 +73,7 @@ func amzRestoreToStruct(restore string) (ongoing bool, expTime time.Time, err er return false, time.Time{}, err } if matches[3] != "" { - expTime, err = time.Parse(http.TimeFormat, matches[3]) + expTime, err = parseRFC7231Time(matches[3]) if err != nil { return false, time.Time{}, err } @@ -240,6 +240,27 @@ func extractObjMetadata(header http.Header) http.Header { return filteredHeader } +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + rfc822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" +) + +func parseTime(t string, formats ...string) (time.Time, error) { + for _, format := range formats { + tt, err := time.Parse(format, t) + if err == nil { + return tt, nil + } + } + return time.Time{}, fmt.Errorf("unable to parse %s in any of the input formats: %s", t, formats) +} + +func parseRFC7231Time(lastModified string) (time.Time, error) { + return parseTime(lastModified, rfc822TimeFormat, rfc822TimeFormatSingleDigitDay, rfc822TimeFormatSingleDigitDayTwoDigitYear) +} + // ToObjectInfo converts http header values into ObjectInfo type, // extracts metadata and fills in all the necessary fields in ObjectInfo. func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectInfo, error) { @@ -267,7 +288,7 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn } // Parse Last-Modified has http time format. - date, err := time.Parse(http.TimeFormat, h.Get("Last-Modified")) + mtime, err := parseRFC7231Time(h.Get("Last-Modified")) if err != nil { return ObjectInfo{}, ErrorResponse{ Code: "InternalError", @@ -289,7 +310,18 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn expiryStr := h.Get("Expires") var expiry time.Time if expiryStr != "" { - expiry, _ = time.Parse(http.TimeFormat, expiryStr) + expiry, err = parseRFC7231Time(expiryStr) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: fmt.Sprintf("'Expiry' is not in supported format: %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } } metadata := extractObjMetadata(h) @@ -337,7 +369,7 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn ETag: etag, Key: objectName, Size: size, - LastModified: date, + LastModified: mtime, ContentType: contentType, Expires: expiry, VersionID: h.Get(amzVersionID), @@ -404,7 +436,7 @@ func redactSignature(origAuth string) string { return "AWS **REDACTED**:**REDACTED**" } - /// Signature V4 authorization header. + // Signature V4 authorization header. // Strip out accessKeyID from: // Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request @@ -552,6 +584,11 @@ func IsNetworkOrHostDown(err error, expectTimeouts bool) bool { if expectTimeouts && errors.Is(err, context.DeadlineExceeded) { return false } + + if errors.Is(err, context.DeadlineExceeded) { + return true + } + // We need to figure if the error either a timeout // or a non-temporary error. urlErr := &url.Error{} @@ -581,6 +618,10 @@ func IsNetworkOrHostDown(err error, expectTimeouts bool) bool { case strings.Contains(err.Error(), "connection timed out"): // If err is a net.Dial timeout. return true + case strings.Contains(err.Error(), "connection refused"): + // If err is connection refused + return true + case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"): // Denial errors return true |