diff options
author | Wim <wim@42.be> | 2022-06-11 23:07:42 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-06-11 23:07:42 +0200 |
commit | 8751fb4bb1eb7cd34ed63be9b3801b8aeac71a1d (patch) | |
tree | 601d2616b05b5b197bd2a3ae7cb245b1a0ea17e7 /vendor/github.com/minio/minio-go | |
parent | 3819062574ac7e4af6a562bf40a425469a7752fb (diff) | |
download | matterbridge-msglm-8751fb4bb1eb7cd34ed63be9b3801b8aeac71a1d.tar.gz matterbridge-msglm-8751fb4bb1eb7cd34ed63be9b3801b8aeac71a1d.tar.bz2 matterbridge-msglm-8751fb4bb1eb7cd34ed63be9b3801b8aeac71a1d.zip |
Update dependencies (#1841)
Diffstat (limited to 'vendor/github.com/minio/minio-go')
26 files changed, 132 insertions, 136 deletions
diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile index e7a3d758..ac4a328f 100644 --- a/vendor/github.com/minio/minio-go/v7/Makefile +++ b/vendor/github.com/minio/minio-go/v7/Makefile @@ -9,7 +9,7 @@ checks: lint vet test examples functional-test lint: @mkdir -p ${GOPATH}/bin - @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.40.1 + @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.45.2 @echo "Running $@ check" @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go index 1e6f3da1..dc37b0c0 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go @@ -103,7 +103,6 @@ func (c *Client) getBucketNotification(ctx context.Context, bucketName string) ( return notification.Configuration{}, err } return processBucketNotificationResponse(bucketName, resp) - } // processes the GetNotification http response from the server. @@ -207,7 +206,7 @@ func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefi // Use a higher buffer to support unexpected // caching done by proxies bio.Buffer(notificationEventBuffer, notificationCapacity) - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary // Unmarshal each line, returns marshaled values. for bio.Scan() { diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go index f349f99e..b59924a3 100644 --- a/vendor/github.com/minio/minio-go/v7/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -202,8 +202,8 @@ func (opts CopySrcOptions) validate() (err error) { // Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, - metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) { - + metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions, +) (ObjectInfo, error) { // Build headers. headers := make(http.Header) @@ -285,8 +285,8 @@ func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuc } func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, - partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) { - + partID int, startOffset int64, length int64, metadata map[string]string, +) (p CompletePart, err error) { headers := make(http.Header) // Set source @@ -338,8 +338,8 @@ func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, des // upload via an upload-part-copy request // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, - headers http.Header) (p CompletePart, err error) { - + headers http.Header, +) (p CompletePart, err error) { // Build query parameters urlValues := make(url.Values) urlValues.Set("partNumber", strconv.Itoa(partNumber)) @@ -492,7 +492,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs .. objParts := []CompletePart{} partIndex := 1 for i, src := range srcs { - var h = make(http.Header) + h := make(http.Header) src.Marshal(h) if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC { dst.Encryption.Marshal(h) diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go index 98f5acf6..2332dbf1 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go @@ -57,7 +57,7 @@ func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePat objectDir, _ := filepath.Split(filePath) if objectDir != "" { // Create any missing top level directories. - if err := os.MkdirAll(objectDir, 0700); err != nil { + if err := os.MkdirAll(objectDir, 0o700); err != nil { return err } } @@ -72,7 +72,7 @@ func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePat filePartPath := filePath + objectStat.ETag + ".part.minio" // If exists, open in append mode. If not create it as a part file. - filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) + filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o600) if err != nil { return err } diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go index 85209b5d..9b2b00ae 100644 --- a/vendor/github.com/minio/minio-go/v7/api-list.go +++ b/vendor/github.com/minio/minio-go/v7/api-list.go @@ -774,7 +774,6 @@ func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPr }(objectMultipartStatCh) // return. return objectMultipartStatCh - } // listMultipartUploadsQuery - (List Multipart Uploads). diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go index 333321aa..342a8dc2 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -38,7 +38,8 @@ import ( ) func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, - opts PutObjectOptions) (info UploadInfo, err error) { + opts PutObjectOptions, +) (info UploadInfo, err error) { info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) if err != nil { errResp := ToErrorResponse(err) @@ -240,7 +241,8 @@ func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, object // uploadPart - Uploads a part in a multipart upload. func (c *Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, - partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) { + partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide, +) (ObjectPart, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectPart{}, err @@ -311,7 +313,8 @@ func (c *Client) uploadPart(ctx context.Context, bucketName, objectName, uploadI // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, - complete completeMultipartUpload, opts PutObjectOptions) (UploadInfo, error) { + complete completeMultipartUpload, opts PutObjectOptions, +) (UploadInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err @@ -392,5 +395,4 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object Expiration: expTime, ExpirationRuleID: ruleID, }, nil - } diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index 55cc4f14..2497aecf 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -42,8 +42,8 @@ import ( // - Any reader which has a method 'ReadAt()' // func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, - reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { - + reader io.Reader, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) @@ -91,7 +91,8 @@ type uploadPartReq struct { // cleaned automatically when the caller i.e http client closes the // stream after uploading all the contents successfully. func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, - reader io.ReaderAt, size int64, opts PutObjectOptions) (info UploadInfo, err error) { + reader io.ReaderAt, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err @@ -147,7 +148,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN } close(uploadPartsCh) - var partsBuf = make([][]byte, opts.getNumThreads()) + partsBuf := make([][]byte, opts.getNumThreads()) for i := range partsBuf { partsBuf[i] = make([]byte, 0, partSize) } @@ -171,7 +172,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN } n, rerr := readFull(io.NewSectionReader(reader, readOffset, partSize), partsBuf[w-1][:partSize]) - if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { + if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF { uploadedPartsCh <- uploadedPartRes{ Error: rerr, } @@ -241,7 +242,8 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN } func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string, - reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { + reader io.Reader, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index e8a964e2..0dc77e6c 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -229,7 +229,8 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part // // NOTE: Upon errors during upload multipart operation is entirely aborted. func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, - opts PutObjectOptions) (info UploadInfo, err error) { + opts PutObjectOptions, +) (info UploadInfo, err error) { if objectSize < 0 && opts.DisableMultipart { return UploadInfo{}, errors.New("object size must be provided with disable multipart upload") } diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go index f9feda30..b7502e2d 100644 --- a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go +++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go @@ -133,7 +133,7 @@ func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts return f, st.Size(), nil } } - var flush = func() error { return nil } + flush := func() error { return nil } if !opts.Compress { if !opts.InMemory { // Insert buffer for writes. diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go index 74c1df5f..5d47d7ec 100644 --- a/vendor/github.com/minio/minio-go/v7/api-select.go +++ b/vendor/github.com/minio/minio-go/v7/api-select.go @@ -519,7 +519,7 @@ func (s *SelectResults) start(pipeWriter *io.PipeWriter) { go func() { for { var prelude preludeInfo - var headers = make(http.Header) + headers := make(http.Header) var err error // Create CRC code @@ -624,7 +624,7 @@ func (p preludeInfo) PayloadLen() int64 { // the struct, func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) { var err error - var pInfo = preludeInfo{} + pInfo := preludeInfo{} // reads total length of the message (first 4 bytes) pInfo.totalLen, err = extractUint32(prelude) @@ -752,7 +752,6 @@ func checkCRC(r io.Reader, expect uint32) error { if msgCRC != expect { return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect) - } return nil } diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 6b874e5c..ee637bd0 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -111,7 +111,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.23" + libraryVersion = "v7.0.24" ) // User Agent should always following the below style. @@ -537,7 +537,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ var retryable bool // Indicates if request can be retried. var bodySeeker io.Seeker // Extracted seeker from io.Reader. - var reqRetry = MaxRetry // Indicates how many times we can retry the request + reqRetry := MaxRetry // Indicates how many times we can retry the request if metadata.contentBody != nil { // Check if body is seekable then it is retryable. diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go index 19dabd25..b7d99c69 100644 --- a/vendor/github.com/minio/minio-go/v7/bucket-cache.go +++ b/vendor/github.com/minio/minio-go/v7/bucket-cache.go @@ -181,6 +181,9 @@ func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string if h, p, err := net.SplitHostPort(targetURL.Host); err == nil { if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" { targetURL.Host = h + if ip := net.ParseIP(h); ip != nil && ip.To16() != nil { + targetURL.Host = "[" + h + "]" + } } } diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go index fdba5274..c2a90239 100644 --- a/vendor/github.com/minio/minio-go/v7/core.go +++ b/vendor/github.com/minio/minio-go/v7/core.go @@ -63,8 +63,8 @@ func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBu // CopyObjectPart - creates a part in a multipart upload by copying (a // part of) an existing object. func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, - partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) { - + partID int, startOffset, length int64, metadata map[string]string, +) (p CompletePart, err error) { return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID, partID, startOffset, length, metadata) } diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index e43e612b..59f347ef 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -61,6 +61,7 @@ const ( letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits ) + const ( serverEndpoint = "SERVER_ENDPOINT" accessKey = "ACCESS_KEY" @@ -69,8 +70,7 @@ const ( enableKMS = "ENABLE_KMS" ) -type mintJSONFormatter struct { -} +type mintJSONFormatter struct{} func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) { data := make(log.Fields, len(entry.Data)) @@ -84,7 +84,7 @@ func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) { data[k] = v } } - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary serialized, err := json.Marshal(data) if err != nil { return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) @@ -168,11 +168,15 @@ func failureLog(testName string, function string, args map[string]interface{}, s var fields log.Fields // log with the fields as per mint if err != nil { - fields = log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, - "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, "error": err} + fields = log.Fields{ + "name": "minio-go: " + testName, "function": function, "args": args, + "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, "error": err, + } } else { - fields = log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, - "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message} + fields = log.Fields{ + "name": "minio-go: " + testName, "function": function, "args": args, + "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, + } } return log.WithFields(cleanEmptyEntries(fields)) } @@ -182,8 +186,10 @@ func ignoredLog(testName string, function string, args map[string]interface{}, s // calculate the test case duration duration := time.Since(startTime) // log with the fields as per mint - fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, - "duration": duration.Nanoseconds() / 1000000, "status": "NA", "alert": strings.Split(alert, " ")[0] + " is NotImplemented"} + fields := log.Fields{ + "name": "minio-go: " + testName, "function": function, "args": args, + "duration": duration.Nanoseconds() / 1000000, "status": "NA", "alert": strings.Split(alert, " ")[0] + " is NotImplemented", + } return log.WithFields(cleanEmptyEntries(fields)) } @@ -632,7 +638,7 @@ func testPutObjectReadAt() { defer cleanupBucket(bucketName, c) bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") + reader := getDataReader("datafile-129-MB") defer reader.Close() // Save the data @@ -738,7 +744,7 @@ func testListObjectVersions() { args["objectName"] = objectName bufSize := dataFileMap["datafile-10-kB"] - var reader = getDataReader("datafile-10-kB") + reader := getDataReader("datafile-10-kB") _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) if err != nil { @@ -857,7 +863,7 @@ func testStatObjectWithVersioning() { args["objectName"] = objectName bufSize := dataFileMap["datafile-10-kB"] - var reader = getDataReader("datafile-10-kB") + reader := getDataReader("datafile-10-kB") _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) if err != nil { @@ -975,7 +981,7 @@ func testGetObjectWithVersioning() { // Save the contents of datafiles to check with GetObject() reader output later var buffers [][]byte - var testFiles = []string{"datafile-1-b", "datafile-10-kB"} + testFiles := []string{"datafile-1-b", "datafile-10-kB"} for _, testFile := range testFiles { r := getDataReader(testFile) @@ -1117,7 +1123,7 @@ func testPutObjectWithVersioning() { // Save the data concurrently. var wg sync.WaitGroup wg.Add(n) - var buffers = make([][]byte, n) + buffers := make([][]byte, n) var errs [n]error for i := 0; i < n; i++ { r := newRandomReader(int64((1<<20)*i+i), int64(i)) @@ -1258,7 +1264,7 @@ func testCopyObjectWithVersioning() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - var testFiles = []string{"datafile-1-b", "datafile-10-kB"} + testFiles := []string{"datafile-1-b", "datafile-10-kB"} for _, testFile := range testFiles { r := getDataReader(testFile) buf, err := ioutil.ReadAll(r) @@ -1395,7 +1401,7 @@ func testConcurrentCopyObjectWithVersioning() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - var testFiles = []string{"datafile-10-kB"} + testFiles := []string{"datafile-10-kB"} for _, testFile := range testFiles { r := getDataReader(testFile) buf, err := ioutil.ReadAll(r) @@ -1556,7 +1562,7 @@ func testComposeObjectWithVersioning() { args["objectName"] = objectName // var testFiles = []string{"datafile-5-MB", "datafile-10-kB"} - var testFiles = []string{"datafile-5-MB", "datafile-10-kB"} + testFiles := []string{"datafile-5-MB", "datafile-10-kB"} var testFilesBytes [][]byte for _, testFile := range testFiles { @@ -2036,7 +2042,7 @@ func testPutObjectWithMetadata() { defer cleanupBucket(bucketName, c) bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") + reader := getDataReader("datafile-129-MB") defer reader.Close() // Save the data @@ -2052,7 +2058,8 @@ func testPutObjectWithMetadata() { } _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ - ContentType: customContentType}) + ContentType: customContentType, + }) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return @@ -2282,7 +2289,7 @@ func testGetObjectSeekEnd() { // Generate 33K of data. bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") + reader := getDataReader("datafile-33-kB") defer reader.Close() // Save the data @@ -2404,7 +2411,7 @@ func testGetObjectClosedTwice() { // Generate 33K of data. bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") + reader := getDataReader("datafile-33-kB") defer reader.Close() // Save the data @@ -2807,7 +2814,7 @@ func testFPutObjectMultipart() { defer cleanupBucket(bucketName, c) // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. - var fileName = getMintDataDirFilePath("datafile-129-MB") + fileName := getMintDataDirFilePath("datafile-129-MB") if fileName == "" { // Make a temp file with minPartSize bytes of data. file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") @@ -2916,7 +2923,7 @@ func testFPutObject() { // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part. // Use different data in part for multipart tests to check parts are uploaded in correct order. - var fName = getMintDataDirFilePath("datafile-129-MB") + fName := getMintDataDirFilePath("datafile-129-MB") if fName == "" { // Make a temp file with minPartSize bytes of data. file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") @@ -3082,7 +3089,7 @@ func testFPutObjectContext() { // Upload 1 parts worth of data to use multipart upload. // Use different data in part for multipart tests to check parts are uploaded in correct order. - var fName = getMintDataDirFilePath("datafile-1-MB") + fName := getMintDataDirFilePath("datafile-1-MB") if fName == "" { // Make a temp file with 1 MiB bytes of data. file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") @@ -3134,7 +3141,6 @@ func testFPutObjectContext() { } successLogger(testName, function, args, startTime).Info() - } // Tests FPutObject request when context cancels after timeout @@ -3183,7 +3189,7 @@ func testFPutObjectContextV2() { // Upload 1 parts worth of data to use multipart upload. // Use different data in part for multipart tests to check parts are uploaded in correct order. - var fName = getMintDataDirFilePath("datafile-1-MB") + fName := getMintDataDirFilePath("datafile-1-MB") if fName == "" { // Make a temp file with 1 MiB bytes of data. file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") @@ -3237,7 +3243,6 @@ func testFPutObjectContextV2() { } successLogger(testName, function, args, startTime).Info() - } // Test validates putObject with context to see if request cancellation is honored. @@ -3283,7 +3288,7 @@ func testPutObjectContext() { defer cleanupBucket(bucketName, c) bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") + reader := getDataReader("datafile-33-kB") defer reader.Close() objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) args["objectName"] = objectName @@ -3312,7 +3317,6 @@ func testPutObjectContext() { } successLogger(testName, function, args, startTime).Info() - } // Tests get object with s3zip extensions. @@ -3428,7 +3432,7 @@ func testGetObjectS3Zip() { lOpts.Prefix = objectName + "/" lOpts.Recursive = true list := c.ListObjects(context.Background(), bucketName, lOpts) - var listed = map[string]minio.ObjectInfo{} + listed := map[string]minio.ObjectInfo{} for item := range list { if item.Err != nil { break @@ -3547,7 +3551,7 @@ func testGetObjectReadSeekFunctional() { // Generate 33K of data. bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") + reader := getDataReader("datafile-33-kB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -3710,7 +3714,7 @@ func testGetObjectReadAtFunctional() { // Generate 33K of data. bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") + reader := getDataReader("datafile-33-kB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -3887,7 +3891,7 @@ func testGetObjectReadAtWhenEOFWasReached() { // Generate 33K of data. bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") + reader := getDataReader("datafile-33-kB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -4004,7 +4008,7 @@ func testPresignedPostPolicy() { defer cleanupBucket(bucketName, c) // Generate 33K of data. - var reader = getDataReader("datafile-33-kB") + reader := getDataReader("datafile-33-kB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -4081,7 +4085,7 @@ func testPresignedPostPolicy() { } // Get a 33KB file to upload and test if set post policy works - var filePath = getMintDataDirFilePath("datafile-33-kB") + filePath := getMintDataDirFilePath("datafile-33-kB") if filePath == "" { // Make a temp file with 33 KB data. file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest") @@ -4228,7 +4232,7 @@ func testCopyObject() { // Generate 33K of data. bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") + reader := getDataReader("datafile-33-kB") // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -4421,7 +4425,7 @@ func testSSECEncryptedGetObjectReadSeekFunctional() { // Generate 129MiB of data. bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") + reader := getDataReader("datafile-129-MB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -4603,7 +4607,7 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() { // Generate 129MiB of data. bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") + reader := getDataReader("datafile-129-MB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -4777,7 +4781,7 @@ func testSSECEncryptedGetObjectReadAtFunctional() { // Generate 129MiB of data. bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") + reader := getDataReader("datafile-129-MB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -4960,7 +4964,7 @@ func testSSES3EncryptedGetObjectReadAtFunctional() { // Generate 129MiB of data. bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") + reader := getDataReader("datafile-129-MB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -5972,7 +5976,6 @@ func testFunctional() { "objectName": objectName, } newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -6025,7 +6028,6 @@ func testFunctional() { "expires": 3600 * time.Second, } presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) - if err != nil { logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) return @@ -6089,7 +6091,6 @@ func testFunctional() { "expires": 3600 * time.Second, } presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) - if err != nil { logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) return @@ -6189,7 +6190,6 @@ func testFunctional() { "expires": 3600 * time.Second, } presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) - if err != nil { logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) return @@ -6513,7 +6513,7 @@ func testPutObjectUploadSeekedObject() { // Seek back to the beginning of the file. tempfile.Seek(0, 0) } - var length = 100 * humanize.KiByte + length := 100 * humanize.KiByte objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) args["objectName"] = objectName @@ -6670,7 +6670,7 @@ func testGetObjectClosedTwiceV2() { // Generate 33K of data. bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") + reader := getDataReader("datafile-33-kB") defer reader.Close() // Save the data @@ -6982,7 +6982,7 @@ func testGetObjectReadSeekFunctionalV2() { // Generate 33K of data. bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") + reader := getDataReader("datafile-33-kB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -7136,7 +7136,7 @@ func testGetObjectReadAtFunctionalV2() { // Generate 33K of data. bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") + reader := getDataReader("datafile-33-kB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -7303,7 +7303,7 @@ func testCopyObjectV2() { // Generate 33K of data. bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") + reader := getDataReader("datafile-33-kB") defer reader.Close() // Save the data @@ -7412,7 +7412,6 @@ func testComposeObjectErrorCasesWrapper(c *minio.Client) { // Make a new bucket in 'us-east-1' (source bucket). err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { logError(testName, function, args, startTime, "", "MakeBucket failed", err) return @@ -9935,6 +9934,7 @@ func testSSES3EncryptedToSSES3CopyObjectPart() { // Do not need to remove destBucketName its same as bucketName. } + func testUserMetadataCopying() { // initialize logging params startTime := time.Now() @@ -10432,7 +10432,7 @@ func testPutObjectNoLengthV2() { args["objectName"] = objectName bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") + reader := getDataReader("datafile-129-MB") defer reader.Close() args["size"] = bufSize @@ -11162,7 +11162,7 @@ func testGetObjectContext() { defer cleanupBucket(bucketName, c) bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") + reader := getDataReader("datafile-33-kB") defer reader.Close() // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -11216,7 +11216,6 @@ func testGetObjectContext() { } successLogger(testName, function, args, startTime).Info() - } // Test get object with FGetObject with a user provided context @@ -11265,7 +11264,7 @@ func testFGetObjectContext() { defer cleanupBucket(bucketName, c) bufSize := dataFileMap["datafile-1-MB"] - var reader = getDataReader("datafile-1-MB") + reader := getDataReader("datafile-1-MB") defer reader.Close() // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -11304,7 +11303,6 @@ func testFGetObjectContext() { } successLogger(testName, function, args, startTime).Info() - } // Test get object with GetObject with a user provided context @@ -11354,7 +11352,7 @@ func testGetObjectRanges() { defer cleanupBucket(bucketName, c) bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") + reader := getDataReader("datafile-129-MB") defer reader.Close() // Save the data objectName := randString(60, rng, "") @@ -11463,7 +11461,7 @@ func testGetObjectACLContext() { defer cleanupBucket(bucketName, c) bufSize := dataFileMap["datafile-1-MB"] - var reader = getDataReader("datafile-1-MB") + reader := getDataReader("datafile-1-MB") defer reader.Close() // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -11525,7 +11523,7 @@ func testGetObjectACLContext() { } bufSize = dataFileMap["datafile-1-MB"] - var reader2 = getDataReader("datafile-1-MB") + reader2 := getDataReader("datafile-1-MB") defer reader2.Close() // Save the data objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -11635,7 +11633,7 @@ func testPutObjectContextV2() { } defer cleanupBucket(bucketName, c) bufSize := dataFileMap["datatfile-33-kB"] - var reader = getDataReader("datafile-33-kB") + reader := getDataReader("datafile-33-kB") defer reader.Close() objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) @@ -11665,7 +11663,6 @@ func testPutObjectContextV2() { } successLogger(testName, function, args, startTime).Info() - } // Test get object with GetObject with custom context @@ -11713,7 +11710,7 @@ func testGetObjectContextV2() { defer cleanupBucket(bucketName, c) bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") + reader := getDataReader("datafile-33-kB") defer reader.Close() // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -11765,7 +11762,6 @@ func testGetObjectContextV2() { } successLogger(testName, function, args, startTime).Info() - } // Test get object with FGetObject with custom context @@ -11814,7 +11810,7 @@ func testFGetObjectContextV2() { defer cleanupBucket(bucketName, c) bufSize := dataFileMap["datatfile-1-MB"] - var reader = getDataReader("datafile-1-MB") + reader := getDataReader("datafile-1-MB") defer reader.Close() // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -11855,7 +11851,6 @@ func testFGetObjectContextV2() { } successLogger(testName, function, args, startTime).Info() - } // Test list object v1 and V2 @@ -11915,7 +11910,7 @@ func testListObjects() { for i, object := range testObjects { bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") + reader := getDataReader("datafile-33-kB") defer reader.Close() _, err = c.PutObject(context.Background(), bucketName, object.name, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: object.storageClass}) @@ -12003,7 +11998,7 @@ func testRemoveObjects() { } bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") + reader := getDataReader("datafile-129-MB") defer reader.Close() _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go index ca6db005..dc3f3cc0 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go @@ -122,7 +122,7 @@ type config struct { // returned if it fails to read from the file. func loadAlias(filename, alias string) (hostConfig, error) { cfg := &config{} - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary configBytes, err := ioutil.ReadFile(filename) if err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go index 485a717e..f7a4af4a 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go @@ -19,6 +19,7 @@ package credentials import ( "bufio" + "context" "errors" "fmt" "io/ioutil" @@ -254,7 +255,10 @@ func getEcsTaskCredentials(client *http.Client, endpoint string, token string) ( } func fetchIMDSToken(client *http.Client, endpoint string) (string, error) { - req, err := http.NewRequest(http.MethodPut, endpoint+tokenPath, nil) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+tokenPath, nil) if err != nil { return "", err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go index b6712b19..1f106ef7 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go @@ -105,8 +105,8 @@ func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (* } func getClientGrantsCredentials(clnt *http.Client, endpoint string, - getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (AssumeRoleWithClientGrantsResponse, error) { - + getClientGrantsTokenExpiry func() (*ClientGrantsToken, error), +) (AssumeRoleWithClientGrantsResponse, error) { accessToken, err := getClientGrantsTokenExpiry() if err != nil { return AssumeRoleWithClientGrantsResponse{}, err @@ -138,7 +138,6 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string, buf, err := ioutil.ReadAll(resp.Body) if err != nil { return AssumeRoleWithClientGrantsResponse{}, err - } _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) if err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go index 39c7892b..586995e8 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go @@ -174,7 +174,6 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) { buf, err := ioutil.ReadAll(resp.Body) if err != nil { return value, err - } _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) if err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go index 7f485d63..c7ac4db3 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go @@ -94,7 +94,7 @@ func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, opt if _, err := url.Parse(endpoint); err != nil { return nil, err } - var identity = &STSCertificateIdentity{ + identity := &STSCertificateIdentity{ STSEndpoint: endpoint, Client: http.Client{ Transport: &http.Transport{ @@ -127,7 +127,7 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) { if err != nil { return Value{}, err } - var livetime = i.S3CredentialLivetime + livetime := i.S3CredentialLivetime if livetime == 0 { livetime = 1 * time.Hour } @@ -155,7 +155,6 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) { buf, err := ioutil.ReadAll(resp.Body) if err != nil { return Value{}, err - } _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) if err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go index 98f6ea65..19bc3ddf 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go @@ -107,7 +107,8 @@ func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdent } func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string, - getWebIDTokenExpiry func() (*WebIdentityToken, error)) (AssumeRoleWithWebIdentityResponse, error) { + getWebIDTokenExpiry func() (*WebIdentityToken, error), +) (AssumeRoleWithWebIdentityResponse, error) { idToken, err := getWebIDTokenExpiry() if err != nil { return AssumeRoleWithWebIdentityResponse{}, err @@ -156,7 +157,6 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession buf, err := ioutil.ReadAll(resp.Body) if err != nil { return AssumeRoleWithWebIdentityResponse{}, err - } _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) if err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go index ce7d2153..06e68e73 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go @@ -101,7 +101,7 @@ func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) { if context == nil { return kms{key: keyID, hasContext: false}, nil } - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary serializedContext, err := json.Marshal(context) if err != nil { return nil, err diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go index b17e6c54..75a1f609 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go @@ -78,11 +78,13 @@ type Arn struct { // NewArn creates new ARN based on the given partition, service, region, account id and resource func NewArn(partition, service, region, accountID, resource string) Arn { - return Arn{Partition: partition, + return Arn{ + Partition: partition, Service: service, Region: region, AccountID: accountID, - Resource: resource} + Resource: resource, + } } // String returns the string format of the ARN diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go index d0ffbd05..97abf8df 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -432,7 +432,6 @@ func (c *Config) RemoveRule(opts Options) error { } c.Rules = newRules return nil - } // Rule - a rule for replication configuration. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go index 7b2ca91d..b1296d2b 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go @@ -114,8 +114,8 @@ func buildChunkHeader(chunkLen int64, signature string) []byte { // buildChunkSignature - returns chunk signature for a given chunk and previous signature. func buildChunkSignature(chunkData []byte, reqTime time.Time, region, - previousSignature, secretAccessKey string) string { - + previousSignature, secretAccessKey string, +) string { chunkStringToSign := buildChunkStringToSign(reqTime, region, previousSignature, chunkData) signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) @@ -200,8 +200,8 @@ func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { // StreamingSignV4 - provides chunked upload signatureV4 support by // implementing io.Reader. func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, - region string, dataLen int64, reqTime time.Time) *http.Request { - + region string, dataLen int64, reqTime time.Time, +) *http.Request { // Set headers needed for streaming signature. prepareStreamingRequest(req, sessionToken, dataLen, reqTime) diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go index 5611770d..f454e675 100644 --- a/vendor/github.com/minio/minio-go/v7/retry.go +++ b/vendor/github.com/minio/minio-go/v7/retry.go @@ -110,6 +110,7 @@ func isS3CodeRetryable(s3Code string) (ok bool) { // List of HTTP status codes which are retryable. var retryableHTTPStatusCodes = map[int]struct{}{ 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet + 499: {}, // client closed request, retry. A non-standard status code introduced by nginx. http.StatusInternalServerError: {}, http.StatusBadGateway: {}, http.StatusServiceUnavailable: {}, diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index 172a04c7..3ebe7b29 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -105,21 +105,6 @@ func sumMD5Base64(data []byte) string { // getEndpointURL - construct a new endpoint. func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { - if strings.Contains(endpoint, ":") { - host, _, err := net.SplitHostPort(endpoint) - if err != nil { - return nil, err - } - if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { - msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." - return nil, errInvalidArgument(msg) - } - } else { - if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) { - msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." - return nil, errInvalidArgument(msg) - } - } // If secure is false, use 'http' scheme. scheme := "https" if !secure { @@ -176,12 +161,18 @@ func isValidEndpointURL(endpointURL url.URL) error { if endpointURL.Path != "/" && endpointURL.Path != "" { return errInvalidArgument("Endpoint url cannot have fully qualified paths.") } - if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") { + host := endpointURL.Hostname() + if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { + msg := "Endpoint: " + endpointURL.Host + " does not follow ip address or domain name standards." + return errInvalidArgument(msg) + } + + if strings.Contains(host, ".s3.amazonaws.com") { if !s3utils.IsAmazonEndpoint(endpointURL) { return errInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") } } - if strings.Contains(endpointURL.Host, ".googleapis.com") { + if strings.Contains(host, ".googleapis.com") { if !s3utils.IsGoogleEndpoint(endpointURL) { return errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") } @@ -513,8 +504,10 @@ func isAmzHeader(headerKey string) bool { return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) } -var md5Pool = sync.Pool{New: func() interface{} { return md5.New() }} -var sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }} +var ( + md5Pool = sync.Pool{New: func() interface{} { return md5.New() }} + sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }} +) func newMd5Hasher() md5simd.Hasher { return hashWrapper{Hash: md5Pool.Get().(hash.Hash), isMD5: true} |