summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/minio/minio-go/v7/utils.go
blob: 4bdf1a3c3bb25c9d7fadc4322ccbda0661c3e0cc (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
/*
 * MinIO Go Library for Amazon S3 Compatible Cloud Storage
 * Copyright 2015-2017 MinIO, Inc.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package minio

import (
	"crypto/md5"
	"encoding/base64"
	"encoding/hex"
	"encoding/xml"
	"fmt"
	"hash"
	"io"
	"io/ioutil"
	"net"
	"net/http"
	"net/url"
	"regexp"
	"strconv"
	"strings"
	"sync"
	"time"

	md5simd "github.com/minio/md5-simd"
	"github.com/minio/minio-go/v7/pkg/s3utils"
	"github.com/minio/sha256-simd"
)

func trimEtag(etag string) string {
	etag = strings.TrimPrefix(etag, "\"")
	return strings.TrimSuffix(etag, "\"")
}

var expirationRegex = regexp.MustCompile(`expiry-date="(.*?)", rule-id="(.*?)"`)

func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) {
	if matches := expirationRegex.FindStringSubmatch(expiration); len(matches) == 3 {
		expTime, err := time.Parse(http.TimeFormat, matches[1])
		if err != nil {
			return time.Time{}, ""
		}
		return expTime, matches[2]
	}
	return time.Time{}, ""
}

// xmlDecoder provide decoded value in xml.
func xmlDecoder(body io.Reader, v interface{}) error {
	d := xml.NewDecoder(body)
	return d.Decode(v)
}

// sum256 calculate sha256sum for an input byte array, returns hex encoded.
func sum256Hex(data []byte) string {
	hash := newSHA256Hasher()
	defer hash.Close()
	hash.Write(data)
	return hex.EncodeToString(hash.Sum(nil))
}

// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded.
func sumMD5Base64(data []byte) string {
	hash := newMd5Hasher()
	defer hash.Close()
	hash.Write(data)
	return base64.StdEncoding.EncodeToString(hash.Sum(nil))
}

// getEndpointURL - construct a new endpoint.
func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
	if strings.Contains(endpoint, ":") {
		host, _, err := net.SplitHostPort(endpoint)
		if err != nil {
			return nil, err
		}
		if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) {
			msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
			return nil, errInvalidArgument(msg)
		}
	} else {
		if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) {
			msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
			return nil, errInvalidArgument(msg)
		}
	}
	// If secure is false, use 'http' scheme.
	scheme := "https"
	if !secure {
		scheme = "http"
	}

	// Construct a secured endpoint URL.
	endpointURLStr := scheme + "://" + endpoint
	endpointURL, err := url.Parse(endpointURLStr)
	if err != nil {
		return nil, err
	}

	// Validate incoming endpoint URL.
	if err := isValidEndpointURL(*endpointURL); err != nil {
		return nil, err
	}
	return endpointURL, nil
}

// closeResponse close non nil response with any response Body.
// convenient wrapper to drain any remaining data on response body.
//
// Subsequently this allows golang http RoundTripper
// to re-use the same connection for future requests.
func closeResponse(resp *http.Response) {
	// Callers should close resp.Body when done reading from it.
	// If resp.Body is not closed, the Client's underlying RoundTripper
	// (typically Transport) may not be able to re-use a persistent TCP
	// connection to the server for a subsequent "keep-alive" request.
	if resp != nil && resp.Body != nil {
		// Drain any remaining Body and then close the connection.
		// Without this closing connection would disallow re-using
		// the same connection for future uses.
		//  - http://stackoverflow.com/a/17961593/4465767
		io.Copy(ioutil.Discard, resp.Body)
		resp.Body.Close()
	}
}

var (
	// Hex encoded string of nil sha256sum bytes.
	emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"

	// Sentinel URL is the default url value which is invalid.
	sentinelURL = url.URL{}
)

// Verify if input endpoint URL is valid.
func isValidEndpointURL(endpointURL url.URL) error {
	if endpointURL == sentinelURL {
		return errInvalidArgument("Endpoint url cannot be empty.")
	}
	if endpointURL.Path != "/" && endpointURL.Path != "" {
		return errInvalidArgument("Endpoint url cannot have fully qualified paths.")
	}
	if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") {
		if !s3utils.IsAmazonEndpoint(endpointURL) {
			return errInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
		}
	}
	if strings.Contains(endpointURL.Host, ".googleapis.com") {
		if !s3utils.IsGoogleEndpoint(endpointURL) {
			return errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
		}
	}
	return nil
}

// Verify if input expires value is valid.
func isValidExpiry(expires time.Duration) error {
	expireSeconds := int64(expires / time.Second)
	if expireSeconds < 1 {
		return errInvalidArgument("Expires cannot be lesser than 1 second.")
	}
	if expireSeconds > 604800 {
		return errInvalidArgument("Expires cannot be greater than 7 days.")
	}
	return nil
}

// Extract only necessary metadata header key/values by
// filtering them out with a list of custom header keys.
func extractObjMetadata(header http.Header) http.Header {
	preserveKeys := []string{
		"Content-Type",
		"Cache-Control",
		"Content-Encoding",
		"Content-Language",
		"Content-Disposition",
		"X-Amz-Storage-Class",
		"X-Amz-Object-Lock-Mode",
		"X-Amz-Object-Lock-Retain-Until-Date",
		"X-Amz-Object-Lock-Legal-Hold",
		"X-Amz-Website-Redirect-Location",
		"X-Amz-Server-Side-Encryption",
		"X-Amz-Tagging-Count",
		"X-Amz-Meta-",
		// Add new headers to be preserved.
		// if you add new headers here, please extend
		// PutObjectOptions{} to preserve them
		// upon upload as well.
	}
	filteredHeader := make(http.Header)
	for k, v := range header {
		var found bool
		for _, prefix := range preserveKeys {
			if !strings.HasPrefix(k, prefix) {
				continue
			}
			found = true
			break
		}
		if found {
			filteredHeader[k] = v
		}
	}
	return filteredHeader
}

// ToObjectInfo converts http header values into ObjectInfo type,
// extracts metadata and fills in all the necessary fields in ObjectInfo.
func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectInfo, error) {
	var err error
	// Trim off the odd double quotes from ETag in the beginning and end.
	etag := trimEtag(h.Get("ETag"))

	// Parse content length is exists
	var size int64 = -1
	contentLengthStr := h.Get("Content-Length")
	if contentLengthStr != "" {
		size, err = strconv.ParseInt(contentLengthStr, 10, 64)
		if err != nil {
			// Content-Length is not valid
			return ObjectInfo{}, ErrorResponse{
				Code:       "InternalError",
				Message:    fmt.Sprintf("Content-Length is not an integer, failed with %v", err),
				BucketName: bucketName,
				Key:        objectName,
				RequestID:  h.Get("x-amz-request-id"),
				HostID:     h.Get("x-amz-id-2"),
				Region:     h.Get("x-amz-bucket-region"),
			}
		}
	}

	// Parse Last-Modified has http time format.
	date, err := time.Parse(http.TimeFormat, h.Get("Last-Modified"))
	if err != nil {
		return ObjectInfo{}, ErrorResponse{
			Code:       "InternalError",
			Message:    fmt.Sprintf("Last-Modified time format is invalid, failed with %v", err),
			BucketName: bucketName,
			Key:        objectName,
			RequestID:  h.Get("x-amz-request-id"),
			HostID:     h.Get("x-amz-id-2"),
			Region:     h.Get("x-amz-bucket-region"),
		}
	}

	// Fetch content type if any present.
	contentType := strings.TrimSpace(h.Get("Content-Type"))
	if contentType == "" {
		contentType = "application/octet-stream"
	}

	expiryStr := h.Get("Expires")
	var expiry time.Time
	if expiryStr != "" {
		expiry, _ = time.Parse(http.TimeFormat, expiryStr)
	}

	metadata := extractObjMetadata(h)
	userMetadata := make(map[string]string)
	for k, v := range metadata {
		if strings.HasPrefix(k, "X-Amz-Meta-") {
			userMetadata[strings.TrimPrefix(k, "X-Amz-Meta-")] = v[0]
		}
	}
	userTags := s3utils.TagDecode(h.Get(amzTaggingHeader))

	var tagCount int
	if count := h.Get(amzTaggingCount); count != "" {
		tagCount, err = strconv.Atoi(count)
		if err != nil {
			return ObjectInfo{}, ErrorResponse{
				Code:       "InternalError",
				Message:    fmt.Sprintf("x-amz-tagging-count is not an integer, failed with %v", err),
				BucketName: bucketName,
				Key:        objectName,
				RequestID:  h.Get("x-amz-request-id"),
				HostID:     h.Get("x-amz-id-2"),
				Region:     h.Get("x-amz-bucket-region"),
			}
		}
	}

	// extract lifecycle expiry date and rule ID
	expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration))

	deleteMarker := h.Get(amzDeleteMarker) == "true"

	// Save object metadata info.
	return ObjectInfo{
		ETag:              etag,
		Key:               objectName,
		Size:              size,
		LastModified:      date,
		ContentType:       contentType,
		Expires:           expiry,
		VersionID:         h.Get(amzVersionID),
		IsDeleteMarker:    deleteMarker,
		ReplicationStatus: h.Get(amzReplicationStatus),
		Expiration:        expTime,
		ExpirationRuleID:  ruleID,
		// Extract only the relevant header keys describing the object.
		// following function filters out a list of standard set of keys
		// which are not part of object metadata.
		Metadata:     metadata,
		UserMetadata: userMetadata,
		UserTags:     userTags,
		UserTagCount: tagCount,
	}, nil
}

var readFull = func(r io.Reader, buf []byte) (n int, err error) {
	// ReadFull reads exactly len(buf) bytes from r into buf.
	// It returns the number of bytes copied and an error if
	// fewer bytes were read. The error is EOF only if no bytes
	// were read. If an EOF happens after reading some but not
	// all the bytes, ReadFull returns ErrUnexpectedEOF.
	// On return, n == len(buf) if and only if err == nil.
	// If r returns an error having read at least len(buf) bytes,
	// the error is dropped.
	for n < len(buf) && err == nil {
		var nn int
		nn, err = r.Read(buf[n:])
		// Some spurious io.Reader's return
		// io.ErrUnexpectedEOF when nn == 0
		// this behavior is undocumented
		// so we are on purpose not using io.ReadFull
		// implementation because this can lead
		// to custom handling, to avoid that
		// we simply modify the original io.ReadFull
		// implementation to avoid this issue.
		// io.ErrUnexpectedEOF with nn == 0 really
		// means that io.EOF
		if err == io.ErrUnexpectedEOF && nn == 0 {
			err = io.EOF
		}
		n += nn
	}
	if n >= len(buf) {
		err = nil
	} else if n > 0 && err == io.EOF {
		err = io.ErrUnexpectedEOF
	}
	return
}

// regCred matches credential string in HTTP header
var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")

// regCred matches signature string in HTTP header
var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")

// Redact out signature value from authorization string.
func redactSignature(origAuth string) string {
	if !strings.HasPrefix(origAuth, signV4Algorithm) {
		// Set a temporary redacted auth
		return "AWS **REDACTED**:**REDACTED**"
	}

	/// Signature V4 authorization header.

	// Strip out accessKeyID from:
	// Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
	newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")

	// Strip out 256-bit signature from: Signature=<256-bit signature>
	return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
}

// Get default location returns the location based on the input
// URL `u`, if region override is provided then all location
// defaults to regionOverride.
//
// If no other cases match then the location is set to `us-east-1`
// as a last resort.
func getDefaultLocation(u url.URL, regionOverride string) (location string) {
	if regionOverride != "" {
		return regionOverride
	}
	region := s3utils.GetRegionFromURL(u)
	if region == "" {
		region = "us-east-1"
	}
	return region
}

var supportedHeaders = []string{
	"content-type",
	"cache-control",
	"content-encoding",
	"content-disposition",
	"content-language",
	"x-amz-website-redirect-location",
	"x-amz-object-lock-mode",
	"x-amz-metadata-directive",
	"x-amz-object-lock-retain-until-date",
	"expires",
	"x-amz-replication-status",
	// Add more supported headers here.
}

// isStorageClassHeader returns true if the header is a supported storage class header
func isStorageClassHeader(headerKey string) bool {
	return strings.EqualFold(amzStorageClass, headerKey)
}

// isStandardHeader returns true if header is a supported header and not a custom header
func isStandardHeader(headerKey string) bool {
	key := strings.ToLower(headerKey)
	for _, header := range supportedHeaders {
		if strings.ToLower(header) == key {
			return true
		}
	}
	return false
}

// sseHeaders is list of server side encryption headers
var sseHeaders = []string{
	"x-amz-server-side-encryption",
	"x-amz-server-side-encryption-aws-kms-key-id",
	"x-amz-server-side-encryption-context",
	"x-amz-server-side-encryption-customer-algorithm",
	"x-amz-server-side-encryption-customer-key",
	"x-amz-server-side-encryption-customer-key-MD5",
}

// isSSEHeader returns true if header is a server side encryption header.
func isSSEHeader(headerKey string) bool {
	key := strings.ToLower(headerKey)
	for _, h := range sseHeaders {
		if strings.ToLower(h) == key {
			return true
		}
	}
	return false
}

// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header.
func isAmzHeader(headerKey string) bool {
	key := strings.ToLower(headerKey)

	return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey)
}

var md5Pool = sync.Pool{New: func() interface{} { return md5.New() }}
var sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }}

func newMd5Hasher() md5simd.Hasher {
	return hashWrapper{Hash: md5Pool.New().(hash.Hash), isMD5: true}
}

func newSHA256Hasher() md5simd.Hasher {
	return hashWrapper{Hash: sha256Pool.New().(hash.Hash), isSHA256: true}
}

// hashWrapper implements the md5simd.Hasher interface.
type hashWrapper struct {
	hash.Hash
	isMD5    bool
	isSHA256 bool
}

// Close will put the hasher back into the pool.
func (m hashWrapper) Close() {
	if m.isMD5 && m.Hash != nil {
		m.Reset()
		md5Pool.Put(m.Hash)
	}
	if m.isSHA256 && m.Hash != nil {
		m.Reset()
		sha256Pool.Put(m.Hash)
	}
	m.Hash = nil
}