From 20f6c05ec50739d31f4dbe9fde0d223f2c43f6e8 Mon Sep 17 00:00:00 2001 From: Wim Date: Sat, 16 Oct 2021 23:11:32 +0200 Subject: Update vendor --- vendor/github.com/minio/minio-go/v7/.gitignore | 4 + vendor/github.com/minio/minio-go/v7/.golangci.yml | 16 + vendor/github.com/minio/minio-go/v7/CNAME | 1 + .../github.com/minio/minio-go/v7/CONTRIBUTING.md | 23 + vendor/github.com/minio/minio-go/v7/LICENSE | 202 + vendor/github.com/minio/minio-go/v7/MAINTAINERS.md | 35 + vendor/github.com/minio/minio-go/v7/Makefile | 35 + vendor/github.com/minio/minio-go/v7/NOTICE | 9 + vendor/github.com/minio/minio-go/v7/README.md | 251 + .../github.com/minio/minio-go/v7/README_zh_CN.md | 260 + .../minio/minio-go/v7/api-bucket-encryption.go | 134 + .../minio/minio-go/v7/api-bucket-lifecycle.go | 147 + .../minio/minio-go/v7/api-bucket-notification.go | 255 + .../minio/minio-go/v7/api-bucket-policy.go | 142 + .../minio/minio-go/v7/api-bucket-replication.go | 228 + .../minio/minio-go/v7/api-bucket-tagging.go | 135 + .../minio/minio-go/v7/api-bucket-versioning.go | 137 + .../minio/minio-go/v7/api-compose-object.go | 580 + .../minio/minio-go/v7/api-copy-object.go | 77 + .../github.com/minio/minio-go/v7/api-datatypes.go | 173 + .../minio/minio-go/v7/api-error-response.go | 271 + .../minio/minio-go/v7/api-get-object-acl.go | 140 + .../minio/minio-go/v7/api-get-object-file.go | 127 + .../github.com/minio/minio-go/v7/api-get-object.go | 681 ++ .../minio/minio-go/v7/api-get-options.go | 140 + vendor/github.com/minio/minio-go/v7/api-list.go | 950 ++ .../minio/minio-go/v7/api-object-legal-hold.go | 176 + .../minio/minio-go/v7/api-object-lock.go | 241 + .../minio/minio-go/v7/api-object-retention.go | 165 + .../minio/minio-go/v7/api-object-tagging.go | 157 + .../github.com/minio/minio-go/v7/api-presigned.go | 216 + .../github.com/minio/minio-go/v7/api-put-bucket.go | 123 + .../minio/minio-go/v7/api-put-object-common.go | 148 + .../minio-go/v7/api-put-object-file-context.go | 64 + .../minio/minio-go/v7/api-put-object-multipart.go | 393 + .../minio/minio-go/v7/api-put-object-streaming.go | 487 + .../github.com/minio/minio-go/v7/api-put-object.go | 370 + vendor/github.com/minio/minio-go/v7/api-remove.go | 419 + .../minio/minio-go/v7/api-s3-datatypes.go | 361 + vendor/github.com/minio/minio-go/v7/api-select.go | 751 ++ vendor/github.com/minio/minio-go/v7/api-stat.go | 127 + vendor/github.com/minio/minio-go/v7/api.go | 896 ++ .../github.com/minio/minio-go/v7/bucket-cache.go | 253 + .../minio/minio-go/v7/code_of_conduct.md | 80 + vendor/github.com/minio/minio-go/v7/constants.go | 92 + vendor/github.com/minio/minio-go/v7/core.go | 133 + .../minio/minio-go/v7/functional_tests.go | 11502 +++++++++++++++++++ vendor/github.com/minio/minio-go/v7/go.mod | 27 + vendor/github.com/minio/minio-go/v7/go.sum | 76 + vendor/github.com/minio/minio-go/v7/hook-reader.go | 85 + .../minio-go/v7/pkg/credentials/assume_role.go | 214 + .../minio/minio-go/v7/pkg/credentials/chain.go | 89 + .../minio-go/v7/pkg/credentials/config.json.sample | 17 + .../minio-go/v7/pkg/credentials/credentials.go | 182 + .../minio-go/v7/pkg/credentials/credentials.sample | 12 + .../minio/minio-go/v7/pkg/credentials/doc.go | 62 + .../minio/minio-go/v7/pkg/credentials/env_aws.go | 71 + .../minio/minio-go/v7/pkg/credentials/env_minio.go | 68 + .../v7/pkg/credentials/file_aws_credentials.go | 120 + .../v7/pkg/credentials/file_minio_client.go | 135 + .../minio/minio-go/v7/pkg/credentials/iam_aws.go | 367 + .../minio-go/v7/pkg/credentials/signature-type.go | 77 + .../minio/minio-go/v7/pkg/credentials/static.go | 67 + .../v7/pkg/credentials/sts_client_grants.go | 162 + .../v7/pkg/credentials/sts_ldap_identity.go | 124 + .../v7/pkg/credentials/sts_web_identity.go | 181 + .../minio/minio-go/v7/pkg/encrypt/server-side.go | 198 + .../minio/minio-go/v7/pkg/lifecycle/lifecycle.go | 303 + .../minio/minio-go/v7/pkg/notification/info.go | 78 + .../minio-go/v7/pkg/notification/notification.go | 395 + .../minio-go/v7/pkg/replication/replication.go | 696 ++ .../minio/minio-go/v7/pkg/s3utils/utils.go | 391 + .../minio/minio-go/v7/pkg/set/stringset.go | 200 + .../v7/pkg/signer/request-signature-streaming.go | 306 + .../minio-go/v7/pkg/signer/request-signature-v2.go | 317 + .../minio-go/v7/pkg/signer/request-signature-v4.go | 318 + .../minio/minio-go/v7/pkg/signer/utils.go | 59 + vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go | 66 + .../github.com/minio/minio-go/v7/pkg/tags/tags.go | 341 + vendor/github.com/minio/minio-go/v7/post-policy.go | 327 + .../minio/minio-go/v7/retry-continous.go | 69 + vendor/github.com/minio/minio-go/v7/retry.go | 124 + .../github.com/minio/minio-go/v7/s3-endpoints.go | 57 + vendor/github.com/minio/minio-go/v7/s3-error.go | 61 + vendor/github.com/minio/minio-go/v7/transport.go | 83 + vendor/github.com/minio/minio-go/v7/utils.go | 488 + 86 files changed, 29320 insertions(+) create mode 100644 vendor/github.com/minio/minio-go/v7/.gitignore create mode 100644 vendor/github.com/minio/minio-go/v7/.golangci.yml create mode 100644 vendor/github.com/minio/minio-go/v7/CNAME create mode 100644 vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md create mode 100644 vendor/github.com/minio/minio-go/v7/LICENSE create mode 100644 vendor/github.com/minio/minio-go/v7/MAINTAINERS.md create mode 100644 vendor/github.com/minio/minio-go/v7/Makefile create mode 100644 vendor/github.com/minio/minio-go/v7/NOTICE create mode 100644 vendor/github.com/minio/minio-go/v7/README.md create mode 100644 vendor/github.com/minio/minio-go/v7/README_zh_CN.md create mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-notification.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-policy.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-replication.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-compose-object.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-copy-object.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-datatypes.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-error-response.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-get-object-acl.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-get-object-file.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-get-object.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-get-options.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-list.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-object-lock.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-object-retention.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-object-tagging.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-presigned.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-put-bucket.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-put-object-common.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-put-object.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-remove.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-select.go create mode 100644 vendor/github.com/minio/minio-go/v7/api-stat.go create mode 100644 vendor/github.com/minio/minio-go/v7/api.go create mode 100644 vendor/github.com/minio/minio-go/v7/bucket-cache.go create mode 100644 vendor/github.com/minio/minio-go/v7/code_of_conduct.md create mode 100644 vendor/github.com/minio/minio-go/v7/constants.go create mode 100644 vendor/github.com/minio/minio-go/v7/core.go create mode 100644 vendor/github.com/minio/minio-go/v7/functional_tests.go create mode 100644 vendor/github.com/minio/minio-go/v7/go.mod create mode 100644 vendor/github.com/minio/minio-go/v7/go.sum create mode 100644 vendor/github.com/minio/minio-go/v7/hook-reader.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/signature-type.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/notification/info.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go create mode 100644 vendor/github.com/minio/minio-go/v7/post-policy.go create mode 100644 vendor/github.com/minio/minio-go/v7/retry-continous.go create mode 100644 vendor/github.com/minio/minio-go/v7/retry.go create mode 100644 vendor/github.com/minio/minio-go/v7/s3-endpoints.go create mode 100644 vendor/github.com/minio/minio-go/v7/s3-error.go create mode 100644 vendor/github.com/minio/minio-go/v7/transport.go create mode 100644 vendor/github.com/minio/minio-go/v7/utils.go (limited to 'vendor/github.com/minio/minio-go/v7') diff --git a/vendor/github.com/minio/minio-go/v7/.gitignore b/vendor/github.com/minio/minio-go/v7/.gitignore new file mode 100644 index 00000000..8081bd0f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/.gitignore @@ -0,0 +1,4 @@ +*~ +*.test +validator +golangci-lint \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/.golangci.yml b/vendor/github.com/minio/minio-go/v7/.golangci.yml new file mode 100644 index 00000000..7d1dd335 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/.golangci.yml @@ -0,0 +1,16 @@ +linters-settings: + misspell: + locale: US + +linters: + disable-all: true + enable: + - typecheck + - goimports + - misspell + - govet + - golint + - ineffassign + - gosimple + - deadcode + - structcheck diff --git a/vendor/github.com/minio/minio-go/v7/CNAME b/vendor/github.com/minio/minio-go/v7/CNAME new file mode 100644 index 00000000..d365a7bb --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/CNAME @@ -0,0 +1 @@ +minio-go.min.io \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md new file mode 100644 index 00000000..8b1ee86c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md @@ -0,0 +1,23 @@ + +### Developer Guidelines + +``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following: + +* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. + - Fork it + - Create your feature branch (git checkout -b my-new-feature) + - Commit your changes (git commit -am 'Add some feature') + - Push to the branch (git push origin my-new-feature) + - Create new Pull Request + +* When you're ready to create a pull request, be sure to: + - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. + - Run `go fmt` + - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. + - Make sure `go test -race ./...` and `go build` completes. + NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables + ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...`` + +* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project + - `minio-go` project is strictly conformant with Golang style + - if you happen to observe offending code, please feel free to send a pull request diff --git a/vendor/github.com/minio/minio-go/v7/LICENSE b/vendor/github.com/minio/minio-go/v7/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md new file mode 100644 index 00000000..f640dfb9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md @@ -0,0 +1,35 @@ +# For maintainers only + +## Responsibilities + +Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) + +### Making new releases +Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key. +```sh +$ export GNUPGHOME=/media/${USER}/minio/trusted +$ git tag -s 4.0.0 +$ git push +$ git push --tags +``` + +### Update version +Once release has been made update `libraryVersion` constant in `api.go` to next to be released version. + +```sh +$ grep libraryVersion api.go + libraryVersion = "4.0.1" +``` + +Commit your changes +``` +$ git commit -a -m "Update version for next release" --author "MinIO Trusted " +``` + +### Announce +Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@min.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release. + +To generate `changelog` +```sh +$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' .. +``` diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile new file mode 100644 index 00000000..ae640c4d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/Makefile @@ -0,0 +1,35 @@ +GOPATH := $(shell go env GOPATH) +TMPDIR := $(shell mktemp -d) + +all: checks + +.PHONY: examples docs + +checks: lint vet test examples functional-test + +lint: + @mkdir -p ${GOPATH}/bin + @which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0) + @echo "Running $@ check" + @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean + @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml + +vet: + @GO111MODULE=on go vet ./... + +test: + @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... + +examples: + @echo "Building s3 examples" + @cd ./examples/s3 && $(foreach v,$(wildcard examples/s3/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) + @echo "Building minio examples" + @cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) + +functional-test: + @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go + +clean: + @echo "Cleaning up all the generated files" + @find . -name '*.test' | xargs rm -fv + @find . -name '*~' | xargs rm -fv diff --git a/vendor/github.com/minio/minio-go/v7/NOTICE b/vendor/github.com/minio/minio-go/v7/NOTICE new file mode 100644 index 00000000..1e8fd3b9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/NOTICE @@ -0,0 +1,9 @@ +MinIO Cloud Storage, (C) 2014-2020 MinIO, Inc. + +This product includes software developed at MinIO, Inc. +(https://min.io/). + +The MinIO project contains unmodified/modified subcomponents too with +separate copyright notices and license terms. Your use of the source +code for these subcomponents is subject to the terms and conditions +of Apache License Version 2.0 diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md new file mode 100644 index 00000000..b5c26d53 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/README.md @@ -0,0 +1,251 @@ +# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) + +The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. + +This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference). + +This document assumes that you have a working [Go development environment](https://golang.org/doc/install). + +## Download from Github +```sh +GO111MODULE=on go get github.com/minio/minio-go/v7 +``` + +## Initialize MinIO Client +MinIO client requires the following four parameters specified to connect to an Amazon S3 compatible object storage. + +| Parameter | Description| +| :--- | :--- | +| endpoint | URL to object storage service. | +| _minio.Options_ | All the options such as credentials, custom transport etc. | + +```go +package main + +import ( + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + endpoint := "play.min.io" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // Initialize minio client object. + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) + if err != nil { + log.Fatalln(err) + } + + log.Printf("%#v\n", minioClient) // minioClient is now set up +} +``` + +## Quick Start Example - File Uploader +This example program connects to an object storage server, creates a bucket and uploads a file to the bucket. + +We will use the MinIO server running at [https://play.min.io](https://play.min.io) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public. + +### FileUploader.go +```go +package main + +import ( + "context" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + ctx := context.Background() + endpoint := "play.min.io" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // Initialize minio client object. + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) + if err != nil { + log.Fatalln(err) + } + + // Make a new bucket called mymusic. + bucketName := "mymusic" + location := "us-east-1" + + err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + // Check to see if we already own this bucket (which happens if you run this twice) + exists, errBucketExists := minioClient.BucketExists(ctx, bucketName) + if errBucketExists == nil && exists { + log.Printf("We already own %s\n", bucketName) + } else { + log.Fatalln(err) + } + } else { + log.Printf("Successfully created %s\n", bucketName) + } + + // Upload the zip file + objectName := "golden-oldies.zip" + filePath := "/tmp/golden-oldies.zip" + contentType := "application/zip" + + // Upload the zip file with FPutObject + info, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType}) + if err != nil { + log.Fatalln(err) + } + + log.Printf("Successfully uploaded %s of size %d\n", objectName, info.Size) +} +``` + +### Run FileUploader +```sh +export GO111MODULE=on +go run file-uploader.go +2016/08/13 17:03:28 Successfully created mymusic +2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 + +mc ls play/mymusic/ +[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip +``` + +## API Reference +The full API Reference is available here. + +* [Complete API Reference](https://docs.min.io/docs/golang-client-api-reference) + +### API Reference : Bucket Operations +* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) +* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) +* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) +* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) +* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) +* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) + +### API Reference : Bucket policy Operations +* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) +* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) + +### API Reference : Bucket notification Operations +* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) +* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension) +* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO Extension) + +### API Reference : File Object Operations +* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) +* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FGetObject) + +### API Reference : Object Operations +* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) +* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) +* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) +* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) +* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) +* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) +* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) +* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) + + +### API Reference : Presigned Operations +* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) +* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) +* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) +* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) + +### API Reference : Client custom settings +* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) +* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) +* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) + +## Full Examples + +### Full Examples : Bucket Operations +* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) +* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) +* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) +* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) +* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) +* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) +* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) + +### Full Examples : Bucket policy Operations +* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) +* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) +* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) + +### Full Examples : Bucket lifecycle Operations +* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) +* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) + +### Full Examples : Bucket encryption Operations +* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go) +* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go) +* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go) + +### Full Examples : Bucket replication Operations +* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go) +* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go) +* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go) + +### Full Examples : Bucket notification Operations +* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) +* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) +* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) +* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension) +* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO Extension) + +### Full Examples : File Object Operations +* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) +* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) +* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) +* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) + +### Full Examples : Object Operations +* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) +* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) +* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) +* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) +* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) +* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) +* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) +* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) +* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) + +### Full Examples : Encrypted Object Operations +* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) +* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) +* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) + +### Full Examples : Presigned Operations +* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) +* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) +* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) +* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) + +## Explore Further +* [Complete Documentation](https://docs.min.io) +* [MinIO Go Client SDK API Reference](https://docs.min.io/docs/golang-client-api-reference) + +## Contribute +[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) + +## License +This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information. diff --git a/vendor/github.com/minio/minio-go/v7/README_zh_CN.md b/vendor/github.com/minio/minio-go/v7/README_zh_CN.md new file mode 100644 index 00000000..64e79341 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/README_zh_CN.md @@ -0,0 +1,260 @@ +# 适用于与Amazon S3兼容云存储的MinIO Go SDK [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) + +MinIO Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。 + +**支持的云存储:** + +- AWS Signature Version 4 + - Amazon S3 + - MinIO + +- AWS Signature Version 2 + - Google Cloud Storage (兼容模式) + - Openstack Swift + Swift3 middleware + - Ceph Object Gateway + - Riak CS + +本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference)。 + +本文假设你已经有 [Go开发环境](https://golang.org/doc/install)。 + +## 从Github下载 +```sh +go get -u github.com/minio/minio-go +``` + +## 初始化MinIO Client +MinIO client需要以下4个参数来连接与Amazon S3兼容的对象存储。 + +| 参数 | 描述| +| :--- | :--- | +| endpoint | 对象存储服务的URL | +| accessKeyID | Access key是唯一标识你的账户的用户ID。 | +| secretAccessKey | Secret key是你账户的密码。 | +| secure | true代表使用HTTPS | + + +```go +package main + +import ( + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + endpoint := "play.min.io" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // 初使化 minio client对象。 + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) + if err != nil { + log.Fatalln(err) + } + + log.Printf("%#v\n", minioClient) // minioClient初使化成功 +} +``` + +## 示例-文件上传 +本示例连接到一个对象存储服务,创建一个存储桶并上传一个文件到存储桶中。 + +我们在本示例中使用运行在 [https://play.min.io](https://play.min.io) 上的MinIO服务,你可以用这个服务来开发和测试。示例中的访问凭据是公开的。 + +### FileUploader.go +```go +package main + +import ( + "context" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + ctx := context.Background() + endpoint := "play.min.io" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // 初使化 minio client对象。 + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) + if err != nil { + log.Fatalln(err) + } + + // 创建一个叫mymusic的存储桶。 + bucketName := "mymusic" + location := "us-east-1" + + err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + // 检查存储桶是否已经存在。 + exists, errBucketExists := minioClient.BucketExists(ctx, bucketName) + if errBucketExists == nil && exists { + log.Printf("We already own %s\n", bucketName) + } else { + log.Fatalln(err) + } + } else { + log.Printf("Successfully created %s\n", bucketName) + } + + // 上传一个zip文件。 + objectName := "golden-oldies.zip" + filePath := "/tmp/golden-oldies.zip" + contentType := "application/zip" + + // 使用FPutObject上传一个zip文件。 + n, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType}) + if err != nil { + log.Fatalln(err) + } + + log.Printf("Successfully uploaded %s of size %d\n", objectName, n) +} +``` + +### 运行FileUploader +```sh +go run file-uploader.go +2016/08/13 17:03:28 Successfully created mymusic +2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 + +mc ls play/mymusic/ +[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip +``` + +## API文档 +完整的API文档在这里。 +* [完整API文档](https://docs.min.io/docs/golang-client-api-reference) + +### API文档 : 操作存储桶 +* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) +* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) +* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) +* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) +* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) +* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) + +### API文档 : 存储桶策略 +* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) +* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) + +### API文档 : 存储桶通知 +* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) +* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO 扩展) +* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO 扩展) + +### API文档 : 操作文件对象 +* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) +* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) + +### API文档 : 操作对象 +* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) +* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) +* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) +* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) +* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) +* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) +* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) +* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) + +### API文档 : Presigned操作 +* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) +* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) +* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) +* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) + +### API文档 : 客户端自定义设置 +* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) +* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) +* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) + +## 完整示例 + +### 完整示例 : 操作存储桶 +* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) +* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) +* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) +* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) +* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) +* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) +* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) + +### 完整示例 : 存储桶策略 +* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) +* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) +* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) + +### 完整示例 : 存储桶生命周期 +* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) +* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) + +### 完整示例 : 存储桶加密 +* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go) +* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go) +* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go) + +### 完整示例 : 存储桶复制 +* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go) +* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go) +* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go) + +### 完整示例 : 存储桶通知 +* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) +* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) +* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) +* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO扩展) +* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO 扩展) + +### 完整示例 : 操作文件对象 +* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) +* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) +* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) +* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) + +### 完整示例 : 操作对象 +* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) +* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) +* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) +* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) +* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) +* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) +* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) +* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) +* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) + +### 完整示例 : 操作加密对象 +* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) +* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) +* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) + +### 完整示例 : Presigned操作 +* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) +* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) +* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) +* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) + +## 了解更多 +* [完整文档](https://docs.min.io) +* [MinIO Go Client SDK API文档](https://docs.min.io/docs/golang-client-api-reference) + +## 贡献 +[贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md) diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go new file mode 100644 index 00000000..e02ab84a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go @@ -0,0 +1,134 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/sse" +) + +// SetBucketEncryption sets the default encryption configuration on an existing bucket. +func (c Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if config == nil { + return errInvalidArgument("configuration cannot be empty") + } + + buf, err := xml.Marshal(config) + if err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("encryption", "") + + // Content-length is mandatory to set a default encryption configuration + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + } + + // Execute PUT to upload a new bucket default encryption configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// RemoveBucketEncryption removes the default encryption configuration on a bucket with a context to control cancellations and timeouts. +func (c Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("encryption", "") + + // DELETE default encryption configuration on a bucket. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// GetBucketEncryption gets the default encryption configuration +// on an existing bucket with a context to control cancellations and timeouts. +func (c Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("encryption", "") + + // Execute GET on bucket to get the default encryption configuration. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + encryptionConfig := &sse.Configuration{} + if err = xmlDecoder(resp.Body, encryptionConfig); err != nil { + return nil, err + } + + return encryptionConfig, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go new file mode 100644 index 00000000..e1fac813 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go @@ -0,0 +1,147 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "io/ioutil" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/lifecycle" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketLifecycle set the lifecycle on an existing bucket. +func (c Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If lifecycle is empty then delete it. + if config.Empty() { + return c.removeBucketLifecycle(ctx, bucketName) + } + + buf, err := xml.Marshal(config) + if err != nil { + return err + } + + // Save the updated lifecycle. + return c.putBucketLifecycle(ctx, bucketName, buf) +} + +// Saves a new bucket lifecycle. +func (c Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Content-length is mandatory for put lifecycle request + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + } + + // Execute PUT to upload a new bucket lifecycle. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Remove lifecycle from a bucket. +func (c Client) removeBucketLifecycle(ctx context.Context, bucketName string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + return nil +} + +// GetBucketLifecycle fetch bucket lifecycle configuration +func (c Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + bucketLifecycle, err := c.getBucketLifecycle(ctx, bucketName) + if err != nil { + return nil, err + } + + config := lifecycle.NewConfiguration() + if err = xml.Unmarshal(bucketLifecycle, config); err != nil { + return nil, err + } + return config, nil +} + +// Request server for current bucket lifecycle. +func (c Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Execute GET on bucket to get lifecycle. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + } + + return ioutil.ReadAll(resp.Body) +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go new file mode 100644 index 00000000..76787eca --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go @@ -0,0 +1,255 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bufio" + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + "time" + + jsoniter "github.com/json-iterator/go" + "github.com/minio/minio-go/v7/pkg/notification" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketNotification saves a new bucket notification with a context to control cancellations and timeouts. +func (c Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("notification", "") + + notifBytes, err := xml.Marshal(&config) + if err != nil { + return err + } + + notifBuffer := bytes.NewReader(notifBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: notifBuffer, + contentLength: int64(len(notifBytes)), + contentMD5Base64: sumMD5Base64(notifBytes), + contentSHA256Hex: sum256Hex(notifBytes), + } + + // Execute PUT to upload a new bucket notification. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config +func (c Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error { + return c.SetBucketNotification(ctx, bucketName, notification.Configuration{}) +} + +// GetBucketNotification returns current bucket notification configuration +func (c Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return notification.Configuration{}, err + } + return c.getBucketNotification(ctx, bucketName) +} + +// Request server for notification rules. +func (c Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) { + urlValues := make(url.Values) + urlValues.Set("notification", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + + defer closeResponse(resp) + if err != nil { + return notification.Configuration{}, err + } + return processBucketNotificationResponse(bucketName, resp) + +} + +// processes the GetNotification http response from the server. +func processBucketNotificationResponse(bucketName string, resp *http.Response) (notification.Configuration, error) { + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + return notification.Configuration{}, errResponse + } + var bucketNotification notification.Configuration + err := xmlDecoder(resp.Body, &bucketNotification) + if err != nil { + return notification.Configuration{}, err + } + return bucketNotification, nil +} + +// ListenNotification listen for all events, this is a MinIO specific API +func (c Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info { + return c.ListenBucketNotification(ctx, "", prefix, suffix, events) +} + +// ListenBucketNotification listen for bucket events, this is a MinIO specific API +func (c Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info { + notificationInfoCh := make(chan notification.Info, 1) + const notificationCapacity = 4 * 1024 * 1024 + notificationEventBuffer := make([]byte, notificationCapacity) + // Only success, start a routine to start reading line by line. + go func(notificationInfoCh chan<- notification.Info) { + defer close(notificationInfoCh) + + // Validate the bucket name. + if bucketName != "" { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + } + return + } + } + + // Check ARN partition to verify if listening bucket is supported + if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) { + select { + case notificationInfoCh <- notification.Info{ + Err: errAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"), + }: + case <-ctx.Done(): + } + return + } + + // Continuously run and listen on bucket notification. + // Create a done channel to control 'ListObjects' go routine. + retryDoneCh := make(chan struct{}, 1) + + // Indicate to our routine to exit cleanly upon return. + defer close(retryDoneCh) + + // Prepare urlValues to pass into the request on every loop + urlValues := make(url.Values) + urlValues.Set("prefix", prefix) + urlValues.Set("suffix", suffix) + urlValues["events"] = events + + // Wait on the jitter retry loop. + for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) { + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + if err != nil { + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + } + return + } + + // Validate http response, upon error return quickly. + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + select { + case notificationInfoCh <- notification.Info{ + Err: errResponse, + }: + case <-ctx.Done(): + } + return + } + + // Initialize a new bufio scanner, to read line by line. + bio := bufio.NewScanner(resp.Body) + + // Use a higher buffer to support unexpected + // caching done by proxies + bio.Buffer(notificationEventBuffer, notificationCapacity) + var json = jsoniter.ConfigCompatibleWithStandardLibrary + + // Unmarshal each line, returns marshaled values. + for bio.Scan() { + var notificationInfo notification.Info + if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { + // Unexpected error during json unmarshal, send + // the error to caller for actionable as needed. + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + return + } + closeResponse(resp) + continue + } + // Send notificationInfo + select { + case notificationInfoCh <- notificationInfo: + case <-ctx.Done(): + closeResponse(resp) + return + } + } + + if err = bio.Err(); err != nil { + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + return + } + } + + // Close current connection before looping further. + closeResponse(resp) + + } + }(notificationInfoCh) + + // Returns the notification info channel, for caller to start reading from. + return notificationInfoCh +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go new file mode 100644 index 00000000..7e01275d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go @@ -0,0 +1,142 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketPolicy sets the access permissions on an existing bucket. +func (c Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If policy is empty then delete the bucket policy. + if policy == "" { + return c.removeBucketPolicy(ctx, bucketName) + } + + // Save the updated policies. + return c.putBucketPolicy(ctx, bucketName, policy) +} + +// Saves a new bucket policy. +func (c Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: strings.NewReader(policy), + contentLength: int64(len(policy)), + } + + // Execute PUT to upload a new bucket policy. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Removes all policies on a bucket. +func (c Client) removeBucketPolicy(ctx context.Context, bucketName string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + return nil +} + +// GetBucketPolicy returns the current policy +func (c Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + bucketPolicy, err := c.getBucketPolicy(ctx, bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "NoSuchBucketPolicy" { + return "", nil + } + return "", err + } + return bucketPolicy, nil +} + +// Request server for current bucket policy. +func (c Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + + defer closeResponse(resp) + if err != nil { + return "", err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", httpRespToErrorResponse(resp, bucketName, "") + } + } + + bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + + policy := string(bucketPolicyBuf) + return policy, err +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go new file mode 100644 index 00000000..41054e13 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go @@ -0,0 +1,228 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/replication" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// RemoveBucketReplication removes a replication config on an existing bucket. +func (c Client) RemoveBucketReplication(ctx context.Context, bucketName string) error { + return c.removeBucketReplication(ctx, bucketName) +} + +// SetBucketReplication sets a replication config on an existing bucket. +func (c Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If replication is empty then delete it. + if cfg.Empty() { + return c.removeBucketReplication(ctx, bucketName) + } + // Save the updated replication. + return c.putBucketReplication(ctx, bucketName, cfg) +} + +// Saves a new bucket replication. +func (c Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication", "") + replication, err := xml.Marshal(cfg) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(replication), + contentLength: int64(len(replication)), + contentMD5Base64: sumMD5Base64(replication), + } + + // Execute PUT to upload a new bucket replication config. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + + return nil +} + +// Remove replication from a bucket. +func (c Client) removeBucketReplication(ctx context.Context, bucketName string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + return nil +} + +// GetBucketReplication fetches bucket replication configuration.If config is not +// found, returns empty config with nil error. +func (c Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return cfg, err + } + bucketReplicationCfg, err := c.getBucketReplication(ctx, bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "ReplicationConfigurationNotFoundError" { + return cfg, nil + } + return cfg, err + } + return bucketReplicationCfg, nil +} + +// Request server for current bucket replication config. +func (c Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication", "") + + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return cfg, err + } + + if resp.StatusCode != http.StatusOK { + return cfg, httpRespToErrorResponse(resp, bucketName, "") + } + + if err = xmlDecoder(resp.Body, &cfg); err != nil { + return cfg, err + } + + return cfg, nil +} + +// GetBucketReplicationMetrics fetches bucket replication status metrics +func (c Client) GetBucketReplicationMetrics(ctx context.Context, bucketName string) (s replication.Metrics, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return s, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-metrics", "") + + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return s, err + } + + if resp.StatusCode != http.StatusOK { + return s, httpRespToErrorResponse(resp, bucketName, "") + } + respBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return s, err + } + + if err := json.Unmarshal(respBytes, &s); err != nil { + return s, err + } + return s, nil +} + +// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication +// is enabled in the replication config +func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (resetID string, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-reset", "") + if olderThan > 0 { + urlValues.Set("older-than", olderThan.String()) + } + + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return "", err + } + + if resp.StatusCode != http.StatusOK { + return "", httpRespToErrorResponse(resp, bucketName, "") + } + respBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + + if err := json.Unmarshal(respBytes, &resetID); err != nil { + return "", err + } + return resetID, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go new file mode 100644 index 00000000..fcb966e6 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go @@ -0,0 +1,135 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// GetBucketTagging fetch tagging configuration for a bucket with a +// context to control cancellations and timeouts. +func (c Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + // Execute GET on bucket to get tagging configuration. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + defer io.Copy(ioutil.Discard, resp.Body) + return tags.ParseBucketXML(resp.Body) +} + +// SetBucketTagging sets tagging configuration for a bucket +// with a context to control cancellations and timeouts. +func (c Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if tags == nil { + return errors.New("nil tags passed") + } + + buf, err := xml.Marshal(tags) + if err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + // Content-length is mandatory to set a default encryption configuration + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + } + + // Execute PUT on bucket to put tagging configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// RemoveBucketTagging removes tagging configuration for a +// bucket with a context to control cancellations and timeouts. +func (c Client) RemoveBucketTagging(ctx context.Context, bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + // Execute DELETE on bucket to remove tagging configuration. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go new file mode 100644 index 00000000..e3ceeb33 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go @@ -0,0 +1,137 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketVersioning sets a bucket versioning configuration +func (c Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + buf, err := xml.Marshal(config) + if err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("versioning", "") + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + contentSHA256Hex: sum256Hex(buf), + } + + // Execute PUT to set a bucket versioning. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// EnableVersioning - enable object versioning in given bucket. +func (c Client) EnableVersioning(ctx context.Context, bucketName string) error { + return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Enabled"}) +} + +// SuspendVersioning - suspend object versioning in given bucket. +func (c Client) SuspendVersioning(ctx context.Context, bucketName string) error { + return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Suspended"}) +} + +// BucketVersioningConfiguration is the versioning configuration structure +type BucketVersioningConfiguration struct { + XMLName xml.Name `xml:"VersioningConfiguration"` + Status string `xml:"Status"` + MFADelete string `xml:"MfaDelete,omitempty"` +} + +// Various supported states +const ( + Enabled = "Enabled" + // Disabled State = "Disabled" only used by MFA Delete not supported yet. + Suspended = "Suspended" +) + +// Enabled returns true if bucket versioning is enabled +func (b BucketVersioningConfiguration) Enabled() bool { + return b.Status == Enabled +} + +// Suspended returns true if bucket versioning is suspended +func (b BucketVersioningConfiguration) Suspended() bool { + return b.Status == Suspended +} + +// GetBucketVersioning gets the versioning configuration on +// an existing bucket with a context to control cancellations and timeouts. +func (c Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return BucketVersioningConfiguration{}, err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("versioning", "") + + // Execute GET on bucket to get the versioning configuration. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return BucketVersioningConfiguration{}, err + } + + if resp.StatusCode != http.StatusOK { + return BucketVersioningConfiguration{}, httpRespToErrorResponse(resp, bucketName, "") + } + + versioningConfig := BucketVersioningConfiguration{} + if err = xmlDecoder(resp.Body, &versioningConfig); err != nil { + return versioningConfig, err + } + + return versioningConfig, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go new file mode 100644 index 00000000..dd597e46 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -0,0 +1,580 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017, 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/google/uuid" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs +type CopyDestOptions struct { + Bucket string // points to destination bucket + Object string // points to destination object + + // `Encryption` is the key info for server-side-encryption with customer + // provided key. If it is nil, no encryption is performed. + Encryption encrypt.ServerSide + + // `userMeta` is the user-metadata key-value pairs to be set on the + // destination. The keys are automatically prefixed with `x-amz-meta-` + // if needed. If nil is passed, and if only a single source (of any + // size) is provided in the ComposeObject call, then metadata from the + // source is copied to the destination. + // if no user-metadata is provided, it is copied from source + // (when there is only once source object in the compose + // request) + UserMetadata map[string]string + // UserMetadata is only set to destination if ReplaceMetadata is true + // other value is UserMetadata is ignored and we preserve src.UserMetadata + // NOTE: if you set this value to true and now metadata is present + // in UserMetadata your destination object will not have any metadata + // set. + ReplaceMetadata bool + + // `userTags` is the user defined object tags to be set on destination. + // This will be set only if the `replaceTags` field is set to true. + // Otherwise this field is ignored + UserTags map[string]string + ReplaceTags bool + + // Specifies whether you want to apply a Legal Hold to the copied object. + LegalHold LegalHoldStatus + + // Object Retention related fields + Mode RetentionMode + RetainUntilDate time.Time + + Size int64 // Needs to be specified if progress bar is specified. + // Progress of the entire copy operation will be sent here. + Progress io.Reader +} + +// Process custom-metadata to remove a `x-amz-meta-` prefix if +// present and validate that keys are distinct (after this +// prefix removal). +func filterCustomMeta(userMeta map[string]string) map[string]string { + m := make(map[string]string) + for k, v := range userMeta { + if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { + k = k[len("x-amz-meta-"):] + } + if _, ok := m[k]; ok { + continue + } + m[k] = v + } + return m +} + +// Marshal converts all the CopyDestOptions into their +// equivalent HTTP header representation +func (opts CopyDestOptions) Marshal(header http.Header) { + const replaceDirective = "REPLACE" + if opts.ReplaceTags { + header.Set(amzTaggingHeaderDirective, replaceDirective) + if tags := s3utils.TagEncode(opts.UserTags); tags != "" { + header.Set(amzTaggingHeader, tags) + } + } + + if opts.LegalHold != LegalHoldStatus("") { + header.Set(amzLegalHoldHeader, opts.LegalHold.String()) + } + + if opts.Mode != RetentionMode("") && !opts.RetainUntilDate.IsZero() { + header.Set(amzLockMode, opts.Mode.String()) + header.Set(amzLockRetainUntil, opts.RetainUntilDate.Format(time.RFC3339)) + } + + if opts.Encryption != nil { + opts.Encryption.Marshal(header) + } + + if opts.ReplaceMetadata { + header.Set("x-amz-metadata-directive", replaceDirective) + for k, v := range filterCustomMeta(opts.UserMetadata) { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { + header.Set(k, v) + } else { + header.Set("x-amz-meta-"+k, v) + } + } + } +} + +// toDestinationInfo returns a validated copyOptions object. +func (opts CopyDestOptions) validate() (err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil { + return err + } + if err = s3utils.CheckValidObjectName(opts.Object); err != nil { + return err + } + if opts.Progress != nil && opts.Size < 0 { + return errInvalidArgument("For progress bar effective size needs to be specified") + } + return nil +} + +// CopySrcOptions represents a source object to be copied, using +// server-side copying APIs. +type CopySrcOptions struct { + Bucket, Object string + VersionID string + MatchETag string + NoMatchETag string + MatchModifiedSince time.Time + MatchUnmodifiedSince time.Time + MatchRange bool + Start, End int64 + Encryption encrypt.ServerSide +} + +// Marshal converts all the CopySrcOptions into their +// equivalent HTTP header representation +func (opts CopySrcOptions) Marshal(header http.Header) { + // Set the source header + header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)) + if opts.VersionID != "" { + header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)+"?versionId="+opts.VersionID) + } + + if opts.MatchETag != "" { + header.Set("x-amz-copy-source-if-match", opts.MatchETag) + } + if opts.NoMatchETag != "" { + header.Set("x-amz-copy-source-if-none-match", opts.NoMatchETag) + } + + if !opts.MatchModifiedSince.IsZero() { + header.Set("x-amz-copy-source-if-modified-since", opts.MatchModifiedSince.Format(http.TimeFormat)) + } + if !opts.MatchUnmodifiedSince.IsZero() { + header.Set("x-amz-copy-source-if-unmodified-since", opts.MatchUnmodifiedSince.Format(http.TimeFormat)) + } + + if opts.Encryption != nil { + encrypt.SSECopy(opts.Encryption).Marshal(header) + } +} + +func (opts CopySrcOptions) validate() (err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil { + return err + } + if err = s3utils.CheckValidObjectName(opts.Object); err != nil { + return err + } + if opts.Start > opts.End || opts.Start < 0 { + return errInvalidArgument("start must be non-negative, and start must be at most end.") + } + return nil +} + +// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. +func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, + metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) { + + // Build headers. + headers := make(http.Header) + + // Set all the metadata headers. + for k, v := range metadata { + headers.Set(k, v) + } + if !dstOpts.Internal.ReplicationStatus.Empty() { + headers.Set(amzBucketReplicationStatus, string(dstOpts.Internal.ReplicationStatus)) + } + if !dstOpts.Internal.SourceMTime.IsZero() { + headers.Set(minIOBucketSourceMTime, dstOpts.Internal.SourceMTime.Format(time.RFC3339Nano)) + } + if dstOpts.Internal.SourceETag != "" { + headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag) + } + if dstOpts.Internal.ReplicationRequest { + headers.Set(minIOBucketReplicationRequest, "") + } + if len(dstOpts.UserTags) != 0 { + headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags)) + } + + reqMetadata := requestMetadata{ + bucketName: destBucket, + objectName: destObject, + customHeader: headers, + } + if dstOpts.Internal.SourceVersionID != "" { + if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil { + return ObjectInfo{}, errInvalidArgument(err.Error()) + } + urlValues := make(url.Values) + urlValues.Set("versionId", dstOpts.Internal.SourceVersionID) + reqMetadata.queryValues = urlValues + } + + // Set the source header + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) + if srcOpts.VersionID != "" { + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)+"?versionId="+srcOpts.VersionID) + } + // Send upload-part-copy request + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject) + } + + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return ObjectInfo{}, err + } + + objInfo := ObjectInfo{ + Key: destObject, + ETag: strings.Trim(cpObjRes.ETag, "\""), + LastModified: cpObjRes.LastModified, + } + return objInfo, nil +} + +func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, + partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) { + + headers := make(http.Header) + + // Set source + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) + + if startOffset < 0 { + return p, errInvalidArgument("startOffset must be non-negative") + } + + if length >= 0 { + headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1)) + } + + for k, v := range metadata { + headers.Set(k, v) + } + + queryValues := make(url.Values) + queryValues.Set("partNumber", strconv.Itoa(partID)) + queryValues.Set("uploadId", uploadID) + + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: destBucket, + objectName: destObject, + customHeader: headers, + queryValues: queryValues, + }) + defer closeResponse(resp) + if err != nil { + return + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return p, httpRespToErrorResponse(resp, destBucket, destObject) + } + + // Decode copy-part response on success. + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return p, err + } + p.PartNumber, p.ETag = partID, cpObjRes.ETag + return p, nil +} + +// uploadPartCopy - helper function to create a part in a multipart +// upload via an upload-part-copy request +// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html +func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, + headers http.Header) (p CompletePart, err error) { + + // Build query parameters + urlValues := make(url.Values) + urlValues.Set("partNumber", strconv.Itoa(partNumber)) + urlValues.Set("uploadId", uploadID) + + // Send upload-part-copy request + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: bucket, + objectName: object, + customHeader: headers, + queryValues: urlValues, + }) + defer closeResponse(resp) + if err != nil { + return p, err + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return p, httpRespToErrorResponse(resp, bucket, object) + } + + // Decode copy-part response on success. + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return p, err + } + p.PartNumber, p.ETag = partNumber, cpObjRes.ETag + return p, nil +} + +// ComposeObject - creates an object using server-side copying +// of existing objects. It takes a list of source objects (with optional offsets) +// and concatenates them into a new object using only server-side copying +// operations. Optionally takes progress reader hook for applications to +// look at current progress. +func (c Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) { + if len(srcs) < 1 || len(srcs) > maxPartsCount { + return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.") + } + + for _, src := range srcs { + if err := src.validate(); err != nil { + return UploadInfo{}, err + } + } + + if err := dst.validate(); err != nil { + return UploadInfo{}, err + } + + srcObjectInfos := make([]ObjectInfo, len(srcs)) + srcObjectSizes := make([]int64, len(srcs)) + var totalSize, totalParts int64 + var err error + for i, src := range srcs { + opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID} + srcObjectInfos[i], err = c.statObject(context.Background(), src.Bucket, src.Object, opts) + if err != nil { + return UploadInfo{}, err + } + + srcCopySize := srcObjectInfos[i].Size + // Check if a segment is specified, and if so, is the + // segment within object bounds? + if src.MatchRange { + // Since range is specified, + // 0 <= src.start <= src.end + // so only invalid case to check is: + if src.End >= srcCopySize || src.Start < 0 { + return UploadInfo{}, errInvalidArgument( + fmt.Sprintf("CopySrcOptions %d has invalid segment-to-copy [%d, %d] (size is %d)", + i, src.Start, src.End, srcCopySize)) + } + srcCopySize = src.End - src.Start + 1 + } + + // Only the last source may be less than `absMinPartSize` + if srcCopySize < absMinPartSize && i < len(srcs)-1 { + return UploadInfo{}, errInvalidArgument( + fmt.Sprintf("CopySrcOptions %d is too small (%d) and it is not the last part", i, srcCopySize)) + } + + // Is data to copy too large? + totalSize += srcCopySize + if totalSize > maxMultipartPutObjectSize { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize)) + } + + // record source size + srcObjectSizes[i] = srcCopySize + + // calculate parts needed for current source + totalParts += partsRequired(srcCopySize) + // Do we need more parts than we are allowed? + if totalParts > maxPartsCount { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf( + "Your proposed compose object requires more than %d parts", maxPartsCount)) + } + } + + // Single source object case (i.e. when only one source is + // involved, it is being copied wholly and at most 5GiB in + // size, emptyfiles are also supported). + if (totalParts == 1 && srcs[0].Start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { + return c.CopyObject(ctx, dst, srcs[0]) + } + + // Now, handle multipart-copy cases. + + // 1. Ensure that the object has not been changed while + // we are copying data. + for i, src := range srcs { + src.MatchETag = srcObjectInfos[i].ETag + } + + // 2. Initiate a new multipart upload. + + // Set user-metadata on the destination object. If no + // user-metadata is specified, and there is only one source, + // (only) then metadata from source is copied. + var userMeta map[string]string + if dst.ReplaceMetadata { + userMeta = dst.UserMetadata + } else { + userMeta = srcObjectInfos[0].UserMetadata + } + + var userTags map[string]string + if dst.ReplaceTags { + userTags = dst.UserTags + } else { + userTags = srcObjectInfos[0].UserTags + } + + uploadID, err := c.newUploadID(ctx, dst.Bucket, dst.Object, PutObjectOptions{ + ServerSideEncryption: dst.Encryption, + UserMetadata: userMeta, + UserTags: userTags, + Mode: dst.Mode, + RetainUntilDate: dst.RetainUntilDate, + LegalHold: dst.LegalHold, + }) + if err != nil { + return UploadInfo{}, err + } + + // 3. Perform copy part uploads + objParts := []CompletePart{} + partIndex := 1 + for i, src := range srcs { + var h = make(http.Header) + src.Marshal(h) + if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC { + dst.Encryption.Marshal(h) + } + + // calculate start/end indices of parts after + // splitting. + startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src) + for j, start := range startIdx { + end := endIdx[j] + + // Add (or reset) source range header for + // upload part copy request. + h.Set("x-amz-copy-source-range", + fmt.Sprintf("bytes=%d-%d", start, end)) + + // make upload-part-copy request + complPart, err := c.uploadPartCopy(ctx, dst.Bucket, + dst.Object, uploadID, partIndex, h) + if err != nil { + return UploadInfo{}, err + } + if dst.Progress != nil { + io.CopyN(ioutil.Discard, dst.Progress, end-start+1) + } + objParts = append(objParts, complPart) + partIndex++ + } + } + + // 4. Make final complete-multipart request. + uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID, + completeMultipartUpload{Parts: objParts}) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalSize + return uploadInfo, nil +} + +// partsRequired is maximum parts possible with +// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1)) +func partsRequired(size int64) int64 { + maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1) + r := size / int64(maxPartSize) + if size%int64(maxPartSize) > 0 { + r++ + } + return r +} + +// calculateEvenSplits - computes splits for a source and returns +// start and end index slices. Splits happen evenly to be sure that no +// part is less than 5MiB, as that could fail the multipart request if +// it is not the last part. +func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) { + if size == 0 { + return + } + + reqParts := partsRequired(size) + startIndex = make([]int64, reqParts) + endIndex = make([]int64, reqParts) + // Compute number of required parts `k`, as: + // + // k = ceiling(size / copyPartSize) + // + // Now, distribute the `size` bytes in the source into + // k parts as evenly as possible: + // + // r parts sized (q+1) bytes, and + // (k - r) parts sized q bytes, where + // + // size = q * k + r (by simple division of size by k, + // so that 0 <= r < k) + // + start := src.Start + if start == -1 { + start = 0 + } + quot, rem := size/reqParts, size%reqParts + nextStart := start + for j := int64(0); j < reqParts; j++ { + curPartSize := quot + if j < rem { + curPartSize++ + } + + cStart := nextStart + cEnd := cStart + curPartSize - 1 + nextStart = cEnd + 1 + + startIndex[j], endIndex[j] = cStart, cEnd + } + return +} diff --git a/vendor/github.com/minio/minio-go/v7/api-copy-object.go b/vendor/github.com/minio/minio-go/v7/api-copy-object.go new file mode 100644 index 00000000..9af036ec --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-copy-object.go @@ -0,0 +1,77 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017, 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "io/ioutil" + "net/http" +) + +// CopyObject - copy a source object into a new object +func (c Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) { + if err := src.validate(); err != nil { + return UploadInfo{}, err + } + + if err := dst.validate(); err != nil { + return UploadInfo{}, err + } + + header := make(http.Header) + dst.Marshal(header) + src.Marshal(header) + + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: dst.Bucket, + objectName: dst.Object, + customHeader: header, + }) + if err != nil { + return UploadInfo{}, err + } + defer closeResponse(resp) + + if resp.StatusCode != http.StatusOK { + return UploadInfo{}, httpRespToErrorResponse(resp, dst.Bucket, dst.Object) + } + + // Update the progress properly after successful copy. + if dst.Progress != nil { + io.Copy(ioutil.Discard, io.LimitReader(dst.Progress, dst.Size)) + } + + cpObjRes := copyObjectResult{} + if err = xmlDecoder(resp.Body, &cpObjRes); err != nil { + return UploadInfo{}, err + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) + + return UploadInfo{ + Bucket: dst.Bucket, + Key: dst.Object, + LastModified: cpObjRes.LastModified, + ETag: trimEtag(resp.Header.Get("ETag")), + VersionID: resp.Header.Get(amzVersionID), + Expiration: expTime, + ExpirationRuleID: ruleID, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go new file mode 100644 index 00000000..970e1fa5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go @@ -0,0 +1,173 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "io" + "net/http" + "time" +) + +// BucketInfo container for bucket metadata. +type BucketInfo struct { + // The name of the bucket. + Name string `json:"name"` + // Date the bucket was created. + CreationDate time.Time `json:"creationDate"` +} + +// StringMap represents map with custom UnmarshalXML +type StringMap map[string]string + +// UnmarshalXML unmarshals the XML into a map of string to strings, +// creating a key in the map for each tag and setting it's value to the +// tags contents. +// +// The fact this function is on the pointer of Map is important, so that +// if m is nil it can be initialized, which is often the case if m is +// nested in another xml structural. This is also why the first thing done +// on the first line is initialize it. +func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + *m = StringMap{} + type xmlMapEntry struct { + XMLName xml.Name + Value string `xml:",chardata"` + } + for { + var e xmlMapEntry + err := d.Decode(&e) + if err == io.EOF { + break + } else if err != nil { + return err + } + (*m)[e.XMLName.Local] = e.Value + } + return nil +} + +// Owner name. +type Owner struct { + DisplayName string `json:"name"` + ID string `json:"id"` +} + +// UploadInfo contains information about the +// newly uploaded or copied object. +type UploadInfo struct { + Bucket string + Key string + ETag string + Size int64 + LastModified time.Time + Location string + VersionID string + + // Lifecycle expiry-date and ruleID associated with the expiry + // not to be confused with `Expires` HTTP header. + Expiration time.Time + ExpirationRuleID string +} + +// ObjectInfo container for object metadata. +type ObjectInfo struct { + // An ETag is optionally set to md5sum of an object. In case of multipart objects, + // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of + // each parts concatenated into one string. + ETag string `json:"etag"` + + Key string `json:"name"` // Name of the object + LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. + Size int64 `json:"size"` // Size in bytes of the object. + ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. + Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached. + + // Collection of additional metadata on the object. + // eg: x-amz-meta-*, content-encoding etc. + Metadata http.Header `json:"metadata" xml:"-"` + + // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. + UserMetadata StringMap `json:"userMetadata"` + + // x-amz-tagging values in their k/v values. + UserTags map[string]string `json:"userTags"` + + // x-amz-tagging-count value + UserTagCount int + + // Owner name. + Owner Owner + + // ACL grant. + Grant []struct { + Grantee struct { + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName"` + URI string `xml:"URI"` + } `xml:"Grantee"` + Permission string `xml:"Permission"` + } `xml:"Grant"` + + // The class of storage used to store the object. + StorageClass string `json:"storageClass"` + + // Versioning related information + IsLatest bool + IsDeleteMarker bool + VersionID string `xml:"VersionId"` + + // x-amz-replication-status value is either in one of the following states + // - COMPLETE + // - PENDING + // - FAILED + // - REPLICA (on the destination) + ReplicationStatus string `xml:"ReplicationStatus"` + + // Lifecycle expiry-date and ruleID associated with the expiry + // not to be confused with `Expires` HTTP header. + Expiration time.Time + ExpirationRuleID string + + // Error + Err error `json:"-"` +} + +// ObjectMultipartInfo container for multipart object metadata. +type ObjectMultipartInfo struct { + // Date and time at which the multipart upload was initiated. + Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Initiator initiator + Owner owner + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass string + + // Key of the object for which the multipart upload was initiated. + Key string + + // Size in bytes of the object. + Size int64 + + // Upload ID that identifies the multipart upload. + UploadID string `xml:"UploadId"` + + // Error + Err error +} diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go new file mode 100644 index 00000000..c45c4fdc --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go @@ -0,0 +1,271 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "fmt" + "net/http" +) + +/* **** SAMPLE ERROR RESPONSE **** + + + AccessDenied + Access Denied + bucketName + objectName + F19772218238A85A + GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD + +*/ + +// ErrorResponse - Is the typed error returned by all API operations. +// ErrorResponse struct should be comparable since it is compared inside +// golang http API (https://github.com/golang/go/issues/29768) +type ErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + BucketName string + Key string + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + + // Region where the bucket is located. This header is returned + // only in HEAD bucket and ListObjects response. + Region string + + // Captures the server string returned in response header. + Server string + + // Underlying HTTP status code for the returned error + StatusCode int `xml:"-" json:"-"` +} + +// ToErrorResponse - Returns parsed ErrorResponse struct from body and +// http headers. +// +// For example: +// +// import s3 "github.com/minio/minio-go/v7" +// ... +// ... +// reader, stat, err := s3.GetObject(...) +// if err != nil { +// resp := s3.ToErrorResponse(err) +// } +// ... +func ToErrorResponse(err error) ErrorResponse { + switch err := err.(type) { + case ErrorResponse: + return err + default: + return ErrorResponse{} + } +} + +// Error - Returns S3 error string. +func (e ErrorResponse) Error() string { + if e.Message == "" { + msg, ok := s3ErrorResponseMap[e.Code] + if !ok { + msg = fmt.Sprintf("Error response code %s.", e.Code) + } + return msg + } + return e.Message +} + +// Common string for errors to report issue location in unexpected +// cases. +const ( + reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." +) + +// httpRespToErrorResponse returns a new encoded ErrorResponse +// structure as error. +func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { + if resp == nil { + msg := "Empty http response. " + reportIssue + return errInvalidArgument(msg) + } + + errResp := ErrorResponse{ + StatusCode: resp.StatusCode, + Server: resp.Header.Get("Server"), + } + + err := xmlDecoder(resp.Body, &errResp) + // Xml decoding failed with no body, fall back to HTTP headers. + if err != nil { + switch resp.StatusCode { + case http.StatusNotFound: + if objectName == "" { + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "NoSuchBucket", + Message: "The specified bucket does not exist.", + BucketName: bucketName, + } + } else { + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "NoSuchKey", + Message: "The specified key does not exist.", + BucketName: bucketName, + Key: objectName, + } + } + case http.StatusForbidden: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "AccessDenied", + Message: "Access Denied.", + BucketName: bucketName, + Key: objectName, + } + case http.StatusConflict: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "Conflict", + Message: "Bucket not empty.", + BucketName: bucketName, + } + case http.StatusPreconditionFailed: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "PreconditionFailed", + Message: s3ErrorResponseMap["PreconditionFailed"], + BucketName: bucketName, + Key: objectName, + } + default: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: resp.Status, + Message: resp.Status, + BucketName: bucketName, + } + } + } + + // Save hostID, requestID and region information + // from headers if not available through error XML. + if errResp.RequestID == "" { + errResp.RequestID = resp.Header.Get("x-amz-request-id") + } + if errResp.HostID == "" { + errResp.HostID = resp.Header.Get("x-amz-id-2") + } + if errResp.Region == "" { + errResp.Region = resp.Header.Get("x-amz-bucket-region") + } + if errResp.Code == "InvalidRegion" && errResp.Region != "" { + errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region) + } + + return errResp +} + +// errTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. +func errTransferAccelerationBucket(bucketName string) error { + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidArgument", + Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.", + BucketName: bucketName, + } +} + +// errEntityTooLarge - Input size is larger than supported maximum. +func errEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "EntityTooLarge", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// errEntityTooSmall - Input size is smaller than supported minimum. +func errEntityTooSmall(totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "EntityTooSmall", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// errUnexpectedEOF - Unexpected end of file reached. +func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "UnexpectedEOF", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// errInvalidBucketName - Invalid bucket name response. +func errInvalidBucketName(message string) error { + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidBucketName", + Message: message, + RequestID: "minio", + } +} + +// errInvalidObjectName - Invalid object name response. +func errInvalidObjectName(message string) error { + return ErrorResponse{ + StatusCode: http.StatusNotFound, + Code: "NoSuchKey", + Message: message, + RequestID: "minio", + } +} + +// errInvalidArgument - Invalid argument response. +func errInvalidArgument(message string) error { + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidArgument", + Message: message, + RequestID: "minio", + } +} + +// errAPINotSupported - API not supported response +// The specified API call is not supported +func errAPINotSupported(message string) error { + return ErrorResponse{ + StatusCode: http.StatusNotImplemented, + Code: "APINotSupported", + Message: message, + RequestID: "minio", + } +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go new file mode 100644 index 00000000..afa53079 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go @@ -0,0 +1,140 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "net/http" + "net/url" +) + +type accessControlPolicy struct { + Owner struct { + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName"` + } `xml:"Owner"` + AccessControlList struct { + Grant []struct { + Grantee struct { + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName"` + URI string `xml:"URI"` + } `xml:"Grantee"` + Permission string `xml:"Permission"` + } `xml:"Grant"` + } `xml:"AccessControlList"` +} + +// GetObjectACL get object ACLs +func (c Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) { + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: url.Values{ + "acl": []string{""}, + }, + }) + if err != nil { + return nil, err + } + defer closeResponse(resp) + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + + res := &accessControlPolicy{} + + if err := xmlDecoder(resp.Body, res); err != nil { + return nil, err + } + + objInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{}) + if err != nil { + return nil, err + } + + objInfo.Owner.DisplayName = res.Owner.DisplayName + objInfo.Owner.ID = res.Owner.ID + + objInfo.Grant = append(objInfo.Grant, res.AccessControlList.Grant...) + + cannedACL := getCannedACL(res) + if cannedACL != "" { + objInfo.Metadata.Add("X-Amz-Acl", cannedACL) + return &objInfo, nil + } + + grantACL := getAmzGrantACL(res) + for k, v := range grantACL { + objInfo.Metadata[k] = v + } + + return &objInfo, nil +} + +func getCannedACL(aCPolicy *accessControlPolicy) string { + grants := aCPolicy.AccessControlList.Grant + + switch { + case len(grants) == 1: + if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" { + return "private" + } + case len(grants) == 2: + for _, g := range grants { + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { + return "authenticated-read" + } + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { + return "public-read" + } + if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID { + return "bucket-owner-read" + } + } + case len(grants) == 3: + for _, g := range grants { + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { + return "public-read-write" + } + } + } + return "" +} + +func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string { + grants := aCPolicy.AccessControlList.Grant + res := map[string][]string{} + + for _, g := range grants { + switch { + case g.Permission == "READ": + res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID) + case g.Permission == "WRITE": + res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID) + case g.Permission == "READ_ACP": + res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID) + case g.Permission == "WRITE_ACP": + res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID) + case g.Permission == "FULL_CONTROL": + res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID) + } + } + return res +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go new file mode 100644 index 00000000..bccff457 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go @@ -0,0 +1,127 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "os" + "path/filepath" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// FGetObject - download contents of an object to a local file. +// The options can be used to specify the GET request further. +func (c Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Verify if destination already exists. + st, err := os.Stat(filePath) + if err == nil { + // If the destination exists and is a directory. + if st.IsDir() { + return errInvalidArgument("fileName is a directory.") + } + } + + // Proceed if file does not exist. return for all other errors. + if err != nil { + if !os.IsNotExist(err) { + return err + } + } + + // Extract top level directory. + objectDir, _ := filepath.Split(filePath) + if objectDir != "" { + // Create any missing top level directories. + if err := os.MkdirAll(objectDir, 0700); err != nil { + return err + } + } + + // Gather md5sum. + objectStat, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts)) + if err != nil { + return err + } + + // Write to a temporary file "fileName.part.minio" before saving. + filePartPath := filePath + objectStat.ETag + ".part.minio" + + // If exists, open in append mode. If not create it as a part file. + filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + return err + } + + // If we return early with an error, be sure to close and delete + // filePart. If we have an error along the way there is a chance + // that filePart is somehow damaged, and we should discard it. + closeAndRemove := true + defer func() { + if closeAndRemove { + _ = filePart.Close() + _ = os.Remove(filePartPath) + } + }() + + // Issue Stat to get the current offset. + st, err = filePart.Stat() + if err != nil { + return err + } + + // Initialize get object request headers to set the + // appropriate range offsets to read from. + if st.Size() > 0 { + opts.SetRange(st.Size(), 0) + } + + // Seek to current position for incoming reader. + objectReader, objectStat, _, err := c.getObject(ctx, bucketName, objectName, opts) + if err != nil { + return err + } + + // Write to the part file. + if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil { + return err + } + + // Close the file before rename, this is specifically needed for Windows users. + closeAndRemove = false + if err = filePart.Close(); err != nil { + return err + } + + // Safely completed. Now commit by renaming to actual filename. + if err = os.Rename(filePartPath, filePath); err != nil { + return err + } + + // Return. + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go new file mode 100644 index 00000000..ef9dd45d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go @@ -0,0 +1,681 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "sync" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// GetObject wrapper function that accepts a request context +func (c Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + + // Detect if snowball is server location we are talking to. + var snowball bool + if location, ok := c.bucketLocCache.Get(bucketName); ok { + if location == "snowball" { + snowball = true + } + } + + var ( + err error + httpReader io.ReadCloser + objectInfo ObjectInfo + totalRead int + ) + + // Create request channel. + reqCh := make(chan getRequest) + // Create response channel. + resCh := make(chan getResponse) + // Create done channel. + doneCh := make(chan struct{}) + + // This routine feeds partial object data as and when the caller reads. + go func() { + defer close(reqCh) + defer close(resCh) + + // Used to verify if etag of object has changed since last read. + var etag string + + // Loop through the incoming control messages and read data. + for { + select { + // When the done channel is closed exit our routine. + case <-doneCh: + // Close the http response body before returning. + // This ends the connection with the server. + if httpReader != nil { + httpReader.Close() + } + return + + // Gather incoming request. + case req := <-reqCh: + // If this is the first request we may not need to do a getObject request yet. + if req.isFirstReq { + // First request is a Read/ReadAt. + if req.isReadOp { + // Differentiate between wanting the whole object and just a range. + if req.isReadAt { + // If this is a ReadAt request only get the specified range. + // Range is set with respect to the offset and length of the buffer requested. + // Do not set objectInfo from the first readAt request because it will not get + // the whole object. + opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + } else if req.Offset > 0 { + opts.SetRange(req.Offset, 0) + } + httpReader, objectInfo, _, err = c.getObject(ctx, bucketName, objectName, opts) + if err != nil { + resCh <- getResponse{Error: err} + return + } + etag = objectInfo.ETag + // Read at least firstReq.Buffer bytes, if not we have + // reached our EOF. + size, err := readFull(httpReader, req.Buffer) + totalRead += size + if size > 0 && err == io.ErrUnexpectedEOF { + if int64(size) < objectInfo.Size { + // In situations when returned size + // is less than the expected content + // length set by the server, make sure + // we return io.ErrUnexpectedEOF + err = io.ErrUnexpectedEOF + } else { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + } else if size == 0 && err == io.EOF && objectInfo.Size > 0 { + // Special cases when server writes more data + // than the content-length, net/http response + // body returns an error, instead of converting + // it to io.EOF - return unexpected EOF. + err = io.ErrUnexpectedEOF + } + // Send back the first response. + resCh <- getResponse{ + objectInfo: objectInfo, + Size: size, + Error: err, + didRead: true, + } + } else { + // First request is a Stat or Seek call. + // Only need to run a StatObject until an actual Read or ReadAt request comes through. + + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") + objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions(opts)) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the go-routine. + return + } + etag = objectInfo.ETag + // Send back the first response. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } + } else if req.settingObjectInfo { // Request is just to get objectInfo. + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") + // Check whether this is snowball + // if yes do not use If-Match feature + // it doesn't work. + if etag != "" && !snowball { + opts.SetMatchETag(etag) + } + objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions(opts)) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the goroutine. + return + } + // Send back the objectInfo. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } else { + // Offset changes fetch the new object at an Offset. + // Because the httpReader may not be set by the first + // request if it was a stat or seek it must be checked + // if the object has been read or not to only initialize + // new ones when they haven't been already. + // All readAt requests are new requests. + if req.DidOffsetChange || !req.beenRead { + // Check whether this is snowball + // if yes do not use If-Match feature + // it doesn't work. + if etag != "" && !snowball { + opts.SetMatchETag(etag) + } + if httpReader != nil { + // Close previously opened http reader. + httpReader.Close() + } + // If this request is a readAt only get the specified range. + if req.isReadAt { + // Range is set with respect to the offset and length of the buffer requested. + opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + } else if req.Offset > 0 { // Range is set with respect to the offset. + opts.SetRange(req.Offset, 0) + } + httpReader, objectInfo, _, err = c.getObject(ctx, bucketName, objectName, opts) + if err != nil { + resCh <- getResponse{ + Error: err, + } + return + } + totalRead = 0 + } + + // Read at least req.Buffer bytes, if not we have + // reached our EOF. + size, err := readFull(httpReader, req.Buffer) + totalRead += size + if size > 0 && err == io.ErrUnexpectedEOF { + if int64(totalRead) < objectInfo.Size { + // In situations when returned size + // is less than the expected content + // length set by the server, make sure + // we return io.ErrUnexpectedEOF + err = io.ErrUnexpectedEOF + } else { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + } else if size == 0 && err == io.EOF && objectInfo.Size > 0 { + // Special cases when server writes more data + // than the content-length, net/http response + // body returns an error, instead of converting + // it to io.EOF - return unexpected EOF. + err = io.ErrUnexpectedEOF + } + + // Reply back how much was read. + resCh <- getResponse{ + Size: size, + Error: err, + didRead: true, + objectInfo: objectInfo, + } + } + } + } + }() + + // Create a newObject through the information sent back by reqCh. + return newObject(reqCh, resCh, doneCh), nil +} + +// get request message container to communicate with internal +// go-routine. +type getRequest struct { + Buffer []byte + Offset int64 // readAt offset. + DidOffsetChange bool // Tracks the offset changes for Seek requests. + beenRead bool // Determines if this is the first time an object is being read. + isReadAt bool // Determines if this request is a request to a specific range + isReadOp bool // Determines if this request is a Read or Read/At request. + isFirstReq bool // Determines if this request is the first time an object is being accessed. + settingObjectInfo bool // Determines if this request is to set the objectInfo of an object. +} + +// get response message container to reply back for the request. +type getResponse struct { + Size int + Error error + didRead bool // Lets subsequent calls know whether or not httpReader has been initiated. + objectInfo ObjectInfo // Used for the first request. +} + +// Object represents an open object. It implements +// Reader, ReaderAt, Seeker, Closer for a HTTP stream. +type Object struct { + // Mutex. + mutex *sync.Mutex + + // User allocated and defined. + reqCh chan<- getRequest + resCh <-chan getResponse + doneCh chan<- struct{} + currOffset int64 + objectInfo ObjectInfo + + // Ask lower level to initiate data fetching based on currOffset + seekData bool + + // Keeps track of closed call. + isClosed bool + + // Keeps track of if this is the first call. + isStarted bool + + // Previous error saved for future calls. + prevErr error + + // Keeps track of if this object has been read yet. + beenRead bool + + // Keeps track of if objectInfo has been set yet. + objectInfoSet bool +} + +// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object. +// Returns back the size of the buffer read, if anything was read, as well +// as any error encountered. For all first requests sent on the object +// it is also responsible for sending back the objectInfo. +func (o *Object) doGetRequest(request getRequest) (getResponse, error) { + o.reqCh <- request + response := <-o.resCh + + // Return any error to the top level. + if response.Error != nil { + return response, response.Error + } + + // This was the first request. + if !o.isStarted { + // The object has been operated on. + o.isStarted = true + } + // Set the objectInfo if the request was not readAt + // and it hasn't been set before. + if !o.objectInfoSet && !request.isReadAt { + o.objectInfo = response.objectInfo + o.objectInfoSet = true + } + // Set beenRead only if it has not been set before. + if !o.beenRead { + o.beenRead = response.didRead + } + // Data are ready on the wire, no need to reinitiate connection in lower level + o.seekData = false + + return response, nil +} + +// setOffset - handles the setting of offsets for +// Read/ReadAt/Seek requests. +func (o *Object) setOffset(bytesRead int64) error { + // Update the currentOffset. + o.currOffset += bytesRead + + if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size { + return io.EOF + } + return nil +} + +// Read reads up to len(b) bytes into b. It returns the number of +// bytes read (0 <= n <= len(b)) and any error encountered. Returns +// io.EOF upon end of file. +func (o *Object) Read(b []byte) (n int, err error) { + if o == nil { + return 0, errInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is previous error saved from previous operation. + if o.prevErr != nil || o.isClosed { + return 0, o.prevErr + } + + // Create a new request. + readReq := getRequest{ + isReadOp: true, + beenRead: o.beenRead, + Buffer: b, + } + + // Alert that this is the first request. + if !o.isStarted { + readReq.isFirstReq = true + } + + // Ask to establish a new data fetch routine based on seekData flag + readReq.DidOffsetChange = o.seekData + readReq.Offset = o.currOffset + + // Send and receive from the first request. + response, err := o.doGetRequest(readReq) + if err != nil && err != io.EOF { + // Save the error for future calls. + o.prevErr = err + return response.Size, err + } + + // Bytes read. + bytesRead := int64(response.Size) + + // Set the new offset. + oerr := o.setOffset(bytesRead) + if oerr != nil { + // Save the error for future calls. + o.prevErr = oerr + return response.Size, oerr + } + + // Return the response. + return response.Size, err +} + +// Stat returns the ObjectInfo structure describing Object. +func (o *Object) Stat() (ObjectInfo, error) { + if o == nil { + return ObjectInfo{}, errInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { + return ObjectInfo{}, o.prevErr + } + + // This is the first request. + if !o.isStarted || !o.objectInfoSet { + // Send the request and get the response. + _, err := o.doGetRequest(getRequest{ + isFirstReq: !o.isStarted, + settingObjectInfo: !o.objectInfoSet, + }) + if err != nil { + o.prevErr = err + return ObjectInfo{}, err + } + } + + return o.objectInfo, nil +} + +// ReadAt reads len(b) bytes from the File starting at byte offset +// off. It returns the number of bytes read and the error, if any. +// ReadAt always returns a non-nil error when n < len(b). At end of +// file, that error is io.EOF. +func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { + if o == nil { + return 0, errInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is error which was saved in previous operation. + if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { + return 0, o.prevErr + } + + // Set the current offset to ReadAt offset, because the current offset will be shifted at the end of this method. + o.currOffset = offset + + // Can only compare offsets to size when size has been set. + if o.objectInfoSet { + // If offset is negative than we return io.EOF. + // If offset is greater than or equal to object size we return io.EOF. + if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 { + return 0, io.EOF + } + } + + // Create the new readAt request. + readAtReq := getRequest{ + isReadOp: true, + isReadAt: true, + DidOffsetChange: true, // Offset always changes. + beenRead: o.beenRead, // Set if this is the first request to try and read. + Offset: offset, // Set the offset. + Buffer: b, + } + + // Alert that this is the first request. + if !o.isStarted { + readAtReq.isFirstReq = true + } + + // Send and receive from the first request. + response, err := o.doGetRequest(readAtReq) + if err != nil && err != io.EOF { + // Save the error. + o.prevErr = err + return response.Size, err + } + // Bytes read. + bytesRead := int64(response.Size) + // There is no valid objectInfo yet + // to compare against for EOF. + if !o.objectInfoSet { + // Update the currentOffset. + o.currOffset += bytesRead + } else { + // If this was not the first request update + // the offsets and compare against objectInfo + // for EOF. + oerr := o.setOffset(bytesRead) + if oerr != nil { + o.prevErr = oerr + return response.Size, oerr + } + } + return response.Size, err +} + +// Seek sets the offset for the next Read or Write to offset, +// interpreted according to whence: 0 means relative to the +// origin of the file, 1 means relative to the current offset, +// and 2 means relative to the end. +// Seek returns the new offset and an error, if any. +// +// Seeking to a negative offset is an error. Seeking to any positive +// offset is legal, subsequent io operations succeed until the +// underlying object is not closed. +func (o *Object) Seek(offset int64, whence int) (n int64, err error) { + if o == nil { + return 0, errInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // At EOF seeking is legal allow only io.EOF, for any other errors we return. + if o.prevErr != nil && o.prevErr != io.EOF { + return 0, o.prevErr + } + + // Negative offset is valid for whence of '2'. + if offset < 0 && whence != 2 { + return 0, errInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence)) + } + + // This is the first request. So before anything else + // get the ObjectInfo. + if !o.isStarted || !o.objectInfoSet { + // Create the new Seek request. + seekReq := getRequest{ + isReadOp: false, + Offset: offset, + isFirstReq: true, + } + // Send and receive from the seek request. + _, err := o.doGetRequest(seekReq) + if err != nil { + // Save the error. + o.prevErr = err + return 0, err + } + } + + // Switch through whence. + switch whence { + default: + return 0, errInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) + case 0: + if o.objectInfo.Size > -1 && offset > o.objectInfo.Size { + return 0, io.EOF + } + o.currOffset = offset + case 1: + if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size { + return 0, io.EOF + } + o.currOffset += offset + case 2: + // If we don't know the object size return an error for io.SeekEnd + if o.objectInfo.Size < 0 { + return 0, errInvalidArgument("Whence END is not supported when the object size is unknown") + } + // Seeking to positive offset is valid for whence '2', but + // since we are backing a Reader we have reached 'EOF' if + // offset is positive. + if offset > 0 { + return 0, io.EOF + } + // Seeking to negative position not allowed for whence. + if o.objectInfo.Size+offset < 0 { + return 0, errInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) + } + o.currOffset = o.objectInfo.Size + offset + } + // Reset the saved error since we successfully seeked, let the Read + // and ReadAt decide. + if o.prevErr == io.EOF { + o.prevErr = nil + } + + // Ask lower level to fetch again from source + o.seekData = true + + // Return the effective offset. + return o.currOffset, nil +} + +// Close - The behavior of Close after the first call returns error +// for subsequent Close() calls. +func (o *Object) Close() (err error) { + if o == nil { + return errInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // if already closed return an error. + if o.isClosed { + return o.prevErr + } + + // Close successfully. + close(o.doneCh) + + // Save for future operations. + errMsg := "Object is already closed. Bad file descriptor." + o.prevErr = errors.New(errMsg) + // Save here that we closed done channel successfully. + o.isClosed = true + return nil +} + +// newObject instantiates a new *minio.Object* +// ObjectInfo will be set by setObjectInfo +func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object { + return &Object{ + mutex: &sync.Mutex{}, + reqCh: reqCh, + resCh: resCh, + doneCh: doneCh, + } +} + +// getObject - retrieve object from Object Storage. +// +// Additionally this function also takes range arguments to download the specified +// range bytes of an object. Setting offset and length = 0 will download the full object. +// +// For more information about the HTTP Range header. +// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. +func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { + // Validate input arguments. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, ObjectInfo{}, nil, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, ObjectInfo{}, nil, err + } + + urlValues := make(url.Values) + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Execute GET on objectName. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: opts.Header(), + contentSHA256Hex: emptySHA256Hex, + }) + if err != nil { + return nil, ObjectInfo{}, nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + return nil, ObjectInfo{}, nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + objectStat, err := ToObjectInfo(bucketName, objectName, resp.Header) + if err != nil { + closeResponse(resp) + return nil, ObjectInfo{}, nil, err + } + + // do not close body here, caller will close + return resp.Body, objectStat, resp.Header, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go new file mode 100644 index 00000000..9e0cb214 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go @@ -0,0 +1,140 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "net/http" + "time" + + "github.com/minio/minio-go/v7/pkg/encrypt" +) + +//AdvancedGetOptions for internal use by MinIO server - not intended for client use. +type AdvancedGetOptions struct { + ReplicationDeleteMarker bool + ReplicationProxyRequest string +} + +// GetObjectOptions are used to specify additional headers or options +// during GET requests. +type GetObjectOptions struct { + headers map[string]string + ServerSideEncryption encrypt.ServerSide + VersionID string + // To be not used by external applications + Internal AdvancedGetOptions +} + +// StatObjectOptions are used to specify additional headers or options +// during GET info/stat requests. +type StatObjectOptions = GetObjectOptions + +// Header returns the http.Header representation of the GET options. +func (o GetObjectOptions) Header() http.Header { + headers := make(http.Header, len(o.headers)) + for k, v := range o.headers { + headers.Set(k, v) + } + if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { + o.ServerSideEncryption.Marshal(headers) + } + // this header is set for active-active replication scenario where GET/HEAD + // to site A is proxy'd to site B if object/version missing on site A. + if o.Internal.ReplicationProxyRequest != "" { + headers.Set(minIOBucketReplicationProxyRequest, o.Internal.ReplicationProxyRequest) + } + return headers +} + +// Set adds a key value pair to the options. The +// key-value pair will be part of the HTTP GET request +// headers. +func (o *GetObjectOptions) Set(key, value string) { + if o.headers == nil { + o.headers = make(map[string]string) + } + o.headers[http.CanonicalHeaderKey(key)] = value +} + +// SetMatchETag - set match etag. +func (o *GetObjectOptions) SetMatchETag(etag string) error { + if etag == "" { + return errInvalidArgument("ETag cannot be empty.") + } + o.Set("If-Match", "\""+etag+"\"") + return nil +} + +// SetMatchETagExcept - set match etag except. +func (o *GetObjectOptions) SetMatchETagExcept(etag string) error { + if etag == "" { + return errInvalidArgument("ETag cannot be empty.") + } + o.Set("If-None-Match", "\""+etag+"\"") + return nil +} + +// SetUnmodified - set unmodified time since. +func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error { + if modTime.IsZero() { + return errInvalidArgument("Modified since cannot be empty.") + } + o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetModified - set modified time since. +func (o *GetObjectOptions) SetModified(modTime time.Time) error { + if modTime.IsZero() { + return errInvalidArgument("Modified since cannot be empty.") + } + o.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetRange - set the start and end offset of the object to be read. +// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference. +func (o *GetObjectOptions) SetRange(start, end int64) error { + switch { + case start == 0 && end < 0: + // Read last '-end' bytes. `bytes=-N`. + o.Set("Range", fmt.Sprintf("bytes=%d", end)) + case 0 < start && end == 0: + // Read everything starting from offset + // 'start'. `bytes=N-`. + o.Set("Range", fmt.Sprintf("bytes=%d-", start)) + case 0 <= start && start <= end: + // Read everything starting at 'start' till the + // 'end'. `bytes=N-M` + o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) + default: + // All other cases such as + // bytes=-3- + // bytes=5-3 + // bytes=-2-4 + // bytes=-3-0 + // bytes=-3--2 + // are invalid. + return errInvalidArgument( + fmt.Sprintf( + "Invalid range specified: start=%d end=%d", + start, end)) + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go new file mode 100644 index 00000000..7996c11e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-list.go @@ -0,0 +1,950 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// ListBuckets list all buckets owned by this authenticated user. +// +// This call requires explicit authentication, no anonymous requests are +// allowed for listing buckets. +// +// api := client.New(....) +// for message := range api.ListBuckets(context.Background()) { +// fmt.Println(message) +// } +// +func (c Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { + // Execute GET on service. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex}) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, "", "") + } + } + listAllMyBucketsResult := listAllMyBucketsResult{} + err = xmlDecoder(resp.Body, &listAllMyBucketsResult) + if err != nil { + return nil, err + } + return listAllMyBucketsResult.Buckets.Bucket, nil +} + +/// Bucket Read Operations. + +func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix string, recursive, metadata bool, maxKeys int) <-chan ObjectInfo { + // Allocate new list objects channel. + objectStatCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if recursive { + // If recursive we do not delimit. + delimiter = "" + } + + // Return object owner information by default + fetchOwner := true + + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + + // Initiate list objects goroutine here. + go func(objectStatCh chan<- ObjectInfo) { + defer close(objectStatCh) + // Save continuationToken for next request. + var continuationToken string + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsV2Query(ctx, bucketName, objectPrefix, continuationToken, + fetchOwner, metadata, delimiter, maxKeys) + if err != nil { + objectStatCh <- ObjectInfo{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + object.ETag = trimEtag(object.ETag) + select { + // Send object content. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + select { + // Send object prefixes. + case objectStatCh <- ObjectInfo{Key: obj.Prefix}: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // If continuation token present, save it for next request. + if result.NextContinuationToken != "" { + continuationToken = result.NextContinuationToken + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectStatCh) + return objectStatCh +} + +// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?continuation-token - Used to continue iterating over a set of objects +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +// ?metadata - Specifies if we want metadata for the objects as part of list operation. +func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, maxkeys int) (ListBucketV2Result, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListBucketV2Result{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + return ListBucketV2Result{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Always set list-type in ListObjects V2 + urlValues.Set("list-type", "2") + + if metadata { + urlValues.Set("metadata", "true") + } + + // Always set encoding-type in ListObjects V2 + urlValues.Set("encoding-type", "url") + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", objectPrefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set continuation token + if continuationToken != "" { + urlValues.Set("continuation-token", continuationToken) + } + + // Fetch owner when listing + if fetchOwner { + urlValues.Set("fetch-owner", "true") + } + + // Set max keys. + if maxkeys > 0 { + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + } + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListBucketV2Result{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Decode listBuckets XML. + listBucketResult := ListBucketV2Result{} + if err = xmlDecoder(resp.Body, &listBucketResult); err != nil { + return listBucketResult, err + } + + // This is an additional verification check to make + // sure proper responses are received. + if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" { + return listBucketResult, ErrorResponse{ + Code: "NotImplemented", + Message: "Truncated response should have continuation token set", + } + } + + for i, obj := range listBucketResult.Contents { + listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + for i, obj := range listBucketResult.CommonPrefixes { + listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + // Success. + return listBucketResult, nil +} + +func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string, recursive bool, maxKeys int) <-chan ObjectInfo { + // Allocate new list objects channel. + objectStatCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if recursive { + // If recursive we do not delimit. + delimiter = "" + } + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + + // Initiate list objects goroutine here. + go func(objectStatCh chan<- ObjectInfo) { + defer close(objectStatCh) + + marker := "" + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsQuery(ctx, bucketName, objectPrefix, marker, delimiter, maxKeys) + if err != nil { + objectStatCh <- ObjectInfo{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + // Save the marker. + marker = object.Key + select { + // Send object content. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + select { + // Send object prefixes. + case objectStatCh <- ObjectInfo{Key: obj.Prefix}: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // If next marker present, save it for next request. + if result.NextMarker != "" { + marker = result.NextMarker + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectStatCh) + return objectStatCh +} + +func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix string, recursive bool, maxKeys int) <-chan ObjectInfo { + // Allocate new list objects channel. + resultCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if recursive { + // If recursive we do not delimit. + delimiter = "" + } + + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(resultCh) + resultCh <- ObjectInfo{ + Err: err, + } + return resultCh + } + + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(prefix); err != nil { + defer close(resultCh) + resultCh <- ObjectInfo{ + Err: err, + } + return resultCh + } + + // Initiate list objects goroutine here. + go func(resultCh chan<- ObjectInfo) { + defer close(resultCh) + + var ( + keyMarker = "" + versionIDMarker = "" + ) + + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectVersionsQuery(ctx, bucketName, prefix, keyMarker, versionIDMarker, delimiter, maxKeys) + if err != nil { + resultCh <- ObjectInfo{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, version := range result.Versions { + info := ObjectInfo{ + ETag: trimEtag(version.ETag), + Key: version.Key, + LastModified: version.LastModified, + Size: version.Size, + Owner: version.Owner, + StorageClass: version.StorageClass, + IsLatest: version.IsLatest, + VersionID: version.VersionID, + + IsDeleteMarker: version.isDeleteMarker, + } + select { + // Send object version info. + case resultCh <- info: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + select { + // Send object prefixes. + case resultCh <- ObjectInfo{Key: obj.Prefix}: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // If next key marker is present, save it for next request. + if result.NextKeyMarker != "" { + keyMarker = result.NextKeyMarker + } + + // If next version id marker is present, save it for next request. + if result.NextVersionIDMarker != "" { + versionIDMarker = result.NextVersionIDMarker + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(resultCh) + return resultCh +} + +// listObjectVersions - (List Object Versions) - List some or all (up to 1000) of the existing objects +// and their versions in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?key-marker - Specifies the key to start with when listing objects in a bucket. +// ?version-id-marker - Specifies the version id marker to start with when listing objects with versions in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int) (ListVersionsResult, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListVersionsResult{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(prefix); err != nil { + return ListVersionsResult{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Set versions to trigger versioning API + urlValues.Set("versions", "") + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", prefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set object marker. + if keyMarker != "" { + urlValues.Set("key-marker", keyMarker) + } + + // Set max keys. + if maxkeys > 0 { + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + } + + // Set version ID marker + if versionIDMarker != "" { + urlValues.Set("version-id-marker", versionIDMarker) + } + + // Always set encoding-type + urlValues.Set("encoding-type", "url") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListVersionsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListVersionsResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Decode ListVersionsResult XML. + listObjectVersionsOutput := ListVersionsResult{} + err = xmlDecoder(resp.Body, &listObjectVersionsOutput) + if err != nil { + return ListVersionsResult{}, err + } + + for i, obj := range listObjectVersionsOutput.Versions { + listObjectVersionsOutput.Versions[i].Key, err = decodeS3Name(obj.Key, listObjectVersionsOutput.EncodingType) + if err != nil { + return listObjectVersionsOutput, err + } + } + + for i, obj := range listObjectVersionsOutput.CommonPrefixes { + listObjectVersionsOutput.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listObjectVersionsOutput.EncodingType) + if err != nil { + return listObjectVersionsOutput, err + } + } + + if listObjectVersionsOutput.NextKeyMarker != "" { + listObjectVersionsOutput.NextKeyMarker, err = decodeS3Name(listObjectVersionsOutput.NextKeyMarker, listObjectVersionsOutput.EncodingType) + if err != nil { + return listObjectVersionsOutput, err + } + } + + return listObjectVersionsOutput, nil +} + +// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?marker - Specifies the key to start with when listing objects in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListBucketResult{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + return ListBucketResult{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", objectPrefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set object marker. + if objectMarker != "" { + urlValues.Set("marker", objectMarker) + } + + // Set max keys. + if maxkeys > 0 { + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + } + + // Always set encoding-type + urlValues.Set("encoding-type", "url") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListBucketResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + // Decode listBuckets XML. + listBucketResult := ListBucketResult{} + err = xmlDecoder(resp.Body, &listBucketResult) + if err != nil { + return listBucketResult, err + } + + for i, obj := range listBucketResult.Contents { + listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + for i, obj := range listBucketResult.CommonPrefixes { + listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + if listBucketResult.NextMarker != "" { + listBucketResult.NextMarker, err = decodeS3Name(listBucketResult.NextMarker, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + return listBucketResult, nil +} + +// ListObjectsOptions holds all options of a list object request +type ListObjectsOptions struct { + // Include objects versions in the listing + WithVersions bool + // Include objects metadata in the listing + WithMetadata bool + // Only list objects with the prefix + Prefix string + // Ignore '/' delimiter + Recursive bool + // The maximum number of objects requested per + // batch, advanced use-case not useful for most + // applications + MaxKeys int + + // Use the deprecated list objects V1 API + UseV1 bool +} + +// ListObjects returns objects list after evaluating the passed options. +// +// api := client.New(....) +// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { +// fmt.Println(object) +// } +// +func (c Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { + if opts.WithVersions { + return c.listObjectVersions(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys) + } + + // Use legacy list objects v1 API + if opts.UseV1 { + return c.listObjects(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys) + } + + // Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1. + if location, ok := c.bucketLocCache.Get(bucketName); ok { + if location == "snowball" { + return c.listObjects(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys) + } + } + + return c.listObjectsV2(ctx, bucketName, opts.Prefix, opts.Recursive, opts.WithMetadata, opts.MaxKeys) +} + +// ListIncompleteUploads - List incompletely uploaded multipart objects. +// +// ListIncompleteUploads lists all incompleted objects matching the +// objectPrefix from the specified bucket. If recursion is enabled +// it would list all subdirectories and all its contents. +// +// Your input parameters are just bucketName, objectPrefix, recursive. +// If you enable recursive as 'true' this function will return back all +// the multipart objects in a given bucket name. +// +// api := client.New(....) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { +// fmt.Println(message) +// } +func (c Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { + return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive) +} + +// listIncompleteUploads lists all incomplete uploads. +func (c Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { + // Allocate channel for multipart uploads. + objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) + // Delimiter is set to "/" by default. + delimiter := "/" + if recursive { + // If recursive do not delimit. + delimiter = "" + } + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return objectMultipartStatCh + } + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return objectMultipartStatCh + } + go func(objectMultipartStatCh chan<- ObjectMultipartInfo) { + defer close(objectMultipartStatCh) + // object and upload ID marker for future requests. + var objectMarker string + var uploadIDMarker string + for { + // list all multipart uploads. + result, err := c.listMultipartUploadsQuery(ctx, bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 0) + if err != nil { + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return + } + objectMarker = result.NextKeyMarker + uploadIDMarker = result.NextUploadIDMarker + + // Send all multipart uploads. + for _, obj := range result.Uploads { + // Calculate total size of the uploaded parts if 'aggregateSize' is enabled. + select { + // Send individual uploads here. + case objectMultipartStatCh <- obj: + // If the context is canceled + case <-ctx.Done(): + return + } + } + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + select { + // Send delimited prefixes here. + case objectMultipartStatCh <- ObjectMultipartInfo{Key: obj.Prefix, Size: 0}: + // If context is canceled. + case <-ctx.Done(): + return + } + } + // Listing ends if result not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectMultipartStatCh) + // return. + return objectMultipartStatCh + +} + +// listMultipartUploadsQuery - (List Multipart Uploads). +// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. +// request parameters. :- +// --------- +// ?key-marker - Specifies the multipart upload after which listing should begin. +// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. +func (c Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set uploads. + urlValues.Set("uploads", "") + // Set object key marker. + if keyMarker != "" { + urlValues.Set("key-marker", keyMarker) + } + // Set upload id marker. + if uploadIDMarker != "" { + urlValues.Set("upload-id-marker", uploadIDMarker) + } + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", prefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Always set encoding-type + urlValues.Set("encoding-type", "url") + + // maxUploads should be 1000 or less. + if maxUploads > 0 { + // Set max-uploads. + urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) + } + + // Execute GET on bucketName to list multipart uploads. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListMultipartUploadsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + // Decode response body. + listMultipartUploadsResult := ListMultipartUploadsResult{} + err = xmlDecoder(resp.Body, &listMultipartUploadsResult) + if err != nil { + return listMultipartUploadsResult, err + } + + listMultipartUploadsResult.NextKeyMarker, err = decodeS3Name(listMultipartUploadsResult.NextKeyMarker, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + + listMultipartUploadsResult.NextUploadIDMarker, err = decodeS3Name(listMultipartUploadsResult.NextUploadIDMarker, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + + for i, obj := range listMultipartUploadsResult.Uploads { + listMultipartUploadsResult.Uploads[i].Key, err = decodeS3Name(obj.Key, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + } + + for i, obj := range listMultipartUploadsResult.CommonPrefixes { + listMultipartUploadsResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + } + + return listMultipartUploadsResult, nil +} + +// listObjectParts list all object parts recursively. +func (c Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { + // Part number marker for the next batch of request. + var nextPartNumberMarker int + partsInfo = make(map[int]ObjectPart) + for { + // Get list of uploaded parts a maximum of 1000 per request. + listObjPartsResult, err := c.listObjectPartsQuery(ctx, bucketName, objectName, uploadID, nextPartNumberMarker, 1000) + if err != nil { + return nil, err + } + // Append to parts info. + for _, part := range listObjPartsResult.ObjectParts { + // Trim off the odd double quotes from ETag in the beginning and end. + part.ETag = trimEtag(part.ETag) + partsInfo[part.PartNumber] = part + } + // Keep part number marker, for the next iteration. + nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker + // Listing ends result is not truncated, return right here. + if !listObjPartsResult.IsTruncated { + break + } + } + + // Return all the parts. + return partsInfo, nil +} + +// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name. +func (c Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) { + var uploadIDs []string + // Make list incomplete uploads recursive. + isRecursive := true + // List all incomplete uploads. + for mpUpload := range c.listIncompleteUploads(ctx, bucketName, objectName, isRecursive) { + if mpUpload.Err != nil { + return nil, mpUpload.Err + } + if objectName == mpUpload.Key { + uploadIDs = append(uploadIDs, mpUpload.UploadID) + } + } + // Return the latest upload id. + return uploadIDs, nil +} + +// listObjectPartsQuery (List Parts query) +// - lists some or all (up to 1000) parts that have been uploaded +// for a specific multipart upload +// +// You can use the request parameters as selection criteria to return +// a subset of the uploads in a bucket, request parameters :- +// --------- +// ?part-number-marker - Specifies the part after which listing should +// begin. +// ?max-parts - Maximum parts to be listed per request. +func (c Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number marker. + urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + // maxParts should be 1000 or less. + if maxParts > 0 { + // Set max parts. + urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) + } + + // Execute GET on objectName to get list of parts. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListObjectPartsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode list object parts XML. + listObjectPartsResult := ListObjectPartsResult{} + err = xmlDecoder(resp.Body, &listObjectPartsResult) + if err != nil { + return listObjectPartsResult, err + } + return listObjectPartsResult, nil +} + +// Decode an S3 object name according to the encoding type +func decodeS3Name(name, encodingType string) (string, error) { + switch encodingType { + case "url": + return url.QueryUnescape(name) + default: + return name, nil + } +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go new file mode 100644 index 00000000..b139c168 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go @@ -0,0 +1,176 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// objectLegalHold - object legal hold specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTObjectPUTLegalHold.html +type objectLegalHold struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"LegalHold"` + Status LegalHoldStatus `xml:"Status,omitempty"` +} + +// PutObjectLegalHoldOptions represents options specified by user for PutObjectLegalHold call +type PutObjectLegalHoldOptions struct { + VersionID string + Status *LegalHoldStatus +} + +// GetObjectLegalHoldOptions represents options specified by user for GetObjectLegalHold call +type GetObjectLegalHoldOptions struct { + VersionID string +} + +// LegalHoldStatus - object legal hold status. +type LegalHoldStatus string + +const ( + // LegalHoldEnabled indicates legal hold is enabled + LegalHoldEnabled LegalHoldStatus = "ON" + + // LegalHoldDisabled indicates legal hold is disabled + LegalHoldDisabled LegalHoldStatus = "OFF" +) + +func (r LegalHoldStatus) String() string { + return string(r) +} + +// IsValid - check whether this legal hold status is valid or not. +func (r LegalHoldStatus) IsValid() bool { + return r == LegalHoldEnabled || r == LegalHoldDisabled +} + +func newObjectLegalHold(status *LegalHoldStatus) (*objectLegalHold, error) { + if status == nil { + return nil, fmt.Errorf("Status not set") + } + if !status.IsValid() { + return nil, fmt.Errorf("invalid legal hold status `%v`", status) + } + legalHold := &objectLegalHold{ + Status: *status, + } + return legalHold, nil +} + +// PutObjectLegalHold : sets object legal hold for a given object and versionID. +func (c Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("legal-hold", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + lh, err := newObjectLegalHold(opts.Status) + if err != nil { + return err + } + + lhData, err := xml.Marshal(lh) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: bytes.NewReader(lhData), + contentLength: int64(len(lhData)), + contentMD5Base64: sumMD5Base64(lhData), + contentSHA256Hex: sum256Hex(lhData), + } + + // Execute PUT Object Legal Hold. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return nil +} + +// GetObjectLegalHold gets legal-hold status of given object. +func (c Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + urlValues := make(url.Values) + urlValues.Set("legal-hold", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + lh := &objectLegalHold{} + if err = xml.NewDecoder(resp.Body).Decode(lh); err != nil { + return nil, err + } + + return &lh.Status, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-lock.go b/vendor/github.com/minio/minio-go/v7/api-object-lock.go new file mode 100644 index 00000000..29f52b05 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-lock.go @@ -0,0 +1,241 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// RetentionMode - object retention mode. +type RetentionMode string + +const ( + // Governance - governance mode. + Governance RetentionMode = "GOVERNANCE" + + // Compliance - compliance mode. + Compliance RetentionMode = "COMPLIANCE" +) + +func (r RetentionMode) String() string { + return string(r) +} + +// IsValid - check whether this retention mode is valid or not. +func (r RetentionMode) IsValid() bool { + return r == Governance || r == Compliance +} + +// ValidityUnit - retention validity unit. +type ValidityUnit string + +const ( + // Days - denotes no. of days. + Days ValidityUnit = "DAYS" + + // Years - denotes no. of years. + Years ValidityUnit = "YEARS" +) + +func (unit ValidityUnit) String() string { + return string(unit) +} + +// IsValid - check whether this validity unit is valid or not. +func (unit ValidityUnit) isValid() bool { + return unit == Days || unit == Years +} + +// Retention - bucket level retention configuration. +type Retention struct { + Mode RetentionMode + Validity time.Duration +} + +func (r Retention) String() string { + return fmt.Sprintf("{Mode:%v, Validity:%v}", r.Mode, r.Validity) +} + +// IsEmpty - returns whether retention is empty or not. +func (r Retention) IsEmpty() bool { + return r.Mode == "" || r.Validity == 0 +} + +// objectLockConfig - object lock configuration specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html +type objectLockConfig struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"ObjectLockConfiguration"` + ObjectLockEnabled string `xml:"ObjectLockEnabled"` + Rule *struct { + DefaultRetention struct { + Mode RetentionMode `xml:"Mode"` + Days *uint `xml:"Days"` + Years *uint `xml:"Years"` + } `xml:"DefaultRetention"` + } `xml:"Rule,omitempty"` +} + +func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit) (*objectLockConfig, error) { + config := &objectLockConfig{ + ObjectLockEnabled: "Enabled", + } + + if mode != nil && validity != nil && unit != nil { + if !mode.IsValid() { + return nil, fmt.Errorf("invalid retention mode `%v`", mode) + } + + if !unit.isValid() { + return nil, fmt.Errorf("invalid validity unit `%v`", unit) + } + + config.Rule = &struct { + DefaultRetention struct { + Mode RetentionMode `xml:"Mode"` + Days *uint `xml:"Days"` + Years *uint `xml:"Years"` + } `xml:"DefaultRetention"` + }{} + + config.Rule.DefaultRetention.Mode = *mode + if *unit == Days { + config.Rule.DefaultRetention.Days = validity + } else { + config.Rule.DefaultRetention.Years = validity + } + + return config, nil + } + + if mode == nil && validity == nil && unit == nil { + return config, nil + } + + return nil, fmt.Errorf("all of retention mode, validity and validity unit must be passed") +} + +// SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. +func (c Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("object-lock", "") + + config, err := newObjectLockConfig(mode, validity, unit) + if err != nil { + return err + } + + configData, err := xml.Marshal(config) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(configData), + contentLength: int64(len(configData)), + contentMD5Base64: sumMD5Base64(configData), + contentSHA256Hex: sum256Hex(configData), + } + + // Execute PUT bucket object lock configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// GetObjectLockConfig gets object lock configuration of given bucket. +func (c Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", nil, nil, nil, err + } + + urlValues := make(url.Values) + urlValues.Set("object-lock", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return "", nil, nil, nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", nil, nil, nil, httpRespToErrorResponse(resp, bucketName, "") + } + } + config := &objectLockConfig{} + if err = xml.NewDecoder(resp.Body).Decode(config); err != nil { + return "", nil, nil, nil, err + } + + if config.Rule != nil { + mode = &config.Rule.DefaultRetention.Mode + if config.Rule.DefaultRetention.Days != nil { + validity = config.Rule.DefaultRetention.Days + days := Days + unit = &days + } else { + validity = config.Rule.DefaultRetention.Years + years := Years + unit = &years + } + return config.ObjectLockEnabled, mode, validity, unit, nil + } + return config.ObjectLockEnabled, nil, nil, nil, nil +} + +// GetBucketObjectLockConfig gets object lock configuration of given bucket. +func (c Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { + _, mode, validity, unit, err = c.GetObjectLockConfig(ctx, bucketName) + return mode, validity, unit, err +} + +// SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. +func (c Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { + return c.SetBucketObjectLockConfig(ctx, bucketName, mode, validity, unit) +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-retention.go b/vendor/github.com/minio/minio-go/v7/api-object-retention.go new file mode 100644 index 00000000..54f2762d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-retention.go @@ -0,0 +1,165 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// objectRetention - object retention specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html +type objectRetention struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"Retention"` + Mode RetentionMode `xml:"Mode,omitempty"` + RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601" xml:"RetainUntilDate,omitempty"` +} + +func newObjectRetention(mode *RetentionMode, date *time.Time) (*objectRetention, error) { + objectRetention := &objectRetention{} + + if date != nil && !date.IsZero() { + objectRetention.RetainUntilDate = date + } + if mode != nil { + if !mode.IsValid() { + return nil, fmt.Errorf("invalid retention mode `%v`", mode) + } + objectRetention.Mode = *mode + } + + return objectRetention, nil +} + +// PutObjectRetentionOptions represents options specified by user for PutObject call +type PutObjectRetentionOptions struct { + GovernanceBypass bool + Mode *RetentionMode + RetainUntilDate *time.Time + VersionID string +} + +// PutObjectRetention sets object retention for a given object and versionID. +func (c Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("retention", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + retention, err := newObjectRetention(opts.Mode, opts.RetainUntilDate) + if err != nil { + return err + } + + retentionData, err := xml.Marshal(retention) + if err != nil { + return err + } + + // Build headers. + headers := make(http.Header) + + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: bytes.NewReader(retentionData), + contentLength: int64(len(retentionData)), + contentMD5Base64: sumMD5Base64(retentionData), + contentSHA256Hex: sum256Hex(retentionData), + customHeader: headers, + } + + // Execute PUT Object Retention. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return nil +} + +// GetObjectRetention gets retention of given object. +func (c Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, nil, err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, nil, err + } + urlValues := make(url.Values) + urlValues.Set("retention", "") + if versionID != "" { + urlValues.Set("versionId", versionID) + } + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return nil, nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + retention := &objectRetention{} + if err = xml.NewDecoder(resp.Body).Decode(retention); err != nil { + return nil, nil, err + } + + return &retention.Mode, retention.RetainUntilDate, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go new file mode 100644 index 00000000..2709efcd --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go @@ -0,0 +1,157 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// PutObjectTaggingOptions holds an object version id +// to update tag(s) of a specific object version +type PutObjectTaggingOptions struct { + VersionID string +} + +// PutObjectTagging replaces or creates object tag(s) and can target +// a specific object version in a versioned bucket. +func (c Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + reqBytes, err := xml.Marshal(otags) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: bytes.NewReader(reqBytes), + contentLength: int64(len(reqBytes)), + contentMD5Base64: sumMD5Base64(reqBytes), + } + + // Execute PUT to set a object tagging. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return nil +} + +// GetObjectTaggingOptions holds the object version ID +// to fetch the tagging key/value pairs +type GetObjectTaggingOptions struct { + VersionID string +} + +// GetObjectTagging fetches object tag(s) with options to target +// a specific object version in a versioned bucket. +func (c Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Execute GET on object to get object tag(s) + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + return tags.ParseObjectXML(resp.Body) +} + +// RemoveObjectTaggingOptions holds the version id of the object to remove +type RemoveObjectTaggingOptions struct { + VersionID string +} + +// RemoveObjectTagging removes object tag(s) with options to control a specific object +// version in a versioned bucket +func (c Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Execute DELETE on object to remove object tag(s) + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + // S3 returns "204 No content" after Object tag deletion. + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return err +} diff --git a/vendor/github.com/minio/minio-go/v7/api-presigned.go b/vendor/github.com/minio/minio-go/v7/api-presigned.go new file mode 100644 index 00000000..80c363da --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-presigned.go @@ -0,0 +1,216 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "errors" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/signer" +) + +// presignURL - Returns a presigned URL for an input 'method'. +// Expires maximum is 7days - ie. 604800 and minimum is 1. +func (c Client) presignURL(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + // Input validation. + if method == "" { + return nil, errInvalidArgument("method cannot be empty.") + } + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err = isValidExpiry(expires); err != nil { + return nil, err + } + + // Convert expires into seconds. + expireSeconds := int64(expires / time.Second) + reqMetadata := requestMetadata{ + presignURL: true, + bucketName: bucketName, + objectName: objectName, + expires: expireSeconds, + queryValues: reqParams, + } + + // Instantiate a new request. + // Since expires is set newRequest will presign the request. + var req *http.Request + if req, err = c.newRequest(ctx, method, reqMetadata); err != nil { + return nil, err + } + return req.URL, nil +} + +// PresignedGetObject - Returns a presigned URL to access an object +// data without credentials. URL can have a maximum expiry of +// upto 7days or a minimum of 1sec. Additionally you can override +// a set of response headers using the query parameters. +func (c Client) PresignedGetObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams) +} + +// PresignedHeadObject - Returns a presigned URL to access +// object metadata without credentials. URL can have a maximum expiry +// of upto 7days or a minimum of 1sec. Additionally you can override +// a set of response headers using the query parameters. +func (c Client) PresignedHeadObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams) +} + +// PresignedPutObject - Returns a presigned URL to upload an object +// without credentials. URL can have a maximum expiry of upto 7days +// or a minimum of 1sec. +func (c Client) PresignedPutObject(ctx context.Context, bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil) +} + +// Presign - returns a presigned URL for any http method of your choice +// along with custom request params. URL can have a maximum expiry of +// upto 7days or a minimum of 1sec. +func (c Client) Presign(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams) +} + +// PresignedPostPolicy - Returns POST urlString, form data to upload an object. +func (c Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) { + // Validate input arguments. + if p.expiration.IsZero() { + return nil, nil, errors.New("Expiration time must be specified") + } + if _, ok := p.formData["key"]; !ok { + return nil, nil, errors.New("object key must be specified") + } + if _, ok := p.formData["bucket"]; !ok { + return nil, nil, errors.New("bucket name must be specified") + } + + bucketName := p.formData["bucket"] + // Fetch the bucket location. + location, err := c.getBucketLocation(ctx, bucketName) + if err != nil { + return nil, nil, err + } + + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName) + + u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil) + if err != nil { + return nil, nil, err + } + + // Get credentials from the configured credentials provider. + credValues, err := c.credsProvider.Get() + if err != nil { + return nil, nil, err + } + + var ( + signerType = credValues.SignerType + sessionToken = credValues.SessionToken + accessKeyID = credValues.AccessKeyID + secretAccessKey = credValues.SecretAccessKey + ) + + if signerType.IsAnonymous() { + return nil, nil, errInvalidArgument("Presigned operations are not supported for anonymous credentials") + } + + // Keep time. + t := time.Now().UTC() + // For signature version '2' handle here. + if signerType.IsV2() { + policyBase64 := p.base64() + p.formData["policy"] = policyBase64 + // For Google endpoint set this value to be 'GoogleAccessId'. + if s3utils.IsGoogleEndpoint(*c.endpointURL) { + p.formData["GoogleAccessId"] = accessKeyID + } else { + // For all other endpoints set this value to be 'AWSAccessKeyId'. + p.formData["AWSAccessKeyId"] = accessKeyID + } + // Sign the policy. + p.formData["signature"] = signer.PostPresignSignatureV2(policyBase64, secretAccessKey) + return u, p.formData, nil + } + + // Add date policy. + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-date", + value: t.Format(iso8601DateFormat), + }); err != nil { + return nil, nil, err + } + + // Add algorithm policy. + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-algorithm", + value: signV4Algorithm, + }); err != nil { + return nil, nil, err + } + + // Add a credential policy. + credential := signer.GetCredential(accessKeyID, location, t, signer.ServiceTypeS3) + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-credential", + value: credential, + }); err != nil { + return nil, nil, err + } + + if sessionToken != "" { + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-security-token", + value: sessionToken, + }); err != nil { + return nil, nil, err + } + } + + // Get base64 encoded policy. + policyBase64 := p.base64() + + // Fill in the form data. + p.formData["policy"] = policyBase64 + p.formData["x-amz-algorithm"] = signV4Algorithm + p.formData["x-amz-credential"] = credential + p.formData["x-amz-date"] = t.Format(iso8601DateFormat) + if sessionToken != "" { + p.formData["x-amz-security-token"] = sessionToken + } + p.formData["x-amz-signature"] = signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location) + return u, p.formData, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go new file mode 100644 index 00000000..df9fe98a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go @@ -0,0 +1,123 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +/// Bucket operations +func (c Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { + // Validate the input arguments. + if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { + return err + } + + err = c.doMakeBucket(ctx, bucketName, opts.Region, opts.ObjectLocking) + if err != nil && (opts.Region == "" || opts.Region == "us-east-1") { + if resp, ok := err.(ErrorResponse); ok && resp.Code == "AuthorizationHeaderMalformed" && resp.Region != "" { + err = c.doMakeBucket(ctx, bucketName, resp.Region, opts.ObjectLocking) + } + } + return err +} + +func (c Client) doMakeBucket(ctx context.Context, bucketName string, location string, objectLockEnabled bool) (err error) { + defer func() { + // Save the location into cache on a successful makeBucket response. + if err == nil { + c.bucketLocCache.Set(bucketName, location) + } + }() + + // If location is empty, treat is a default region 'us-east-1'. + if location == "" { + location = "us-east-1" + // For custom region clients, default + // to custom region instead not 'us-east-1'. + if c.region != "" { + location = c.region + } + } + // PUT bucket request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + bucketLocation: location, + } + + if objectLockEnabled { + headers := make(http.Header) + headers.Add("x-amz-bucket-object-lock-enabled", "true") + reqMetadata.customHeader = headers + } + + // If location is not 'us-east-1' create bucket location config. + if location != "us-east-1" && location != "" { + createBucketConfig := createBucketConfiguration{} + createBucketConfig.Location = location + var createBucketConfigBytes []byte + createBucketConfigBytes, err = xml.Marshal(createBucketConfig) + if err != nil { + return err + } + reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes) + reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes) + reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) + reqMetadata.contentLength = int64(len(createBucketConfigBytes)) + } + + // Execute PUT to create a new bucket. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Success. + return nil +} + +// MakeBucketOptions holds all options to tweak bucket creation +type MakeBucketOptions struct { + // Bucket location + Region string + // Enable object locking + ObjectLocking bool +} + +// MakeBucket creates a new bucket with bucketName with a context to control cancellations and timeouts. +// +// Location is an optional argument, by default all buckets are +// created in US Standard Region. +// +// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html +// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations +func (c Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { + return c.makeBucket(ctx, bucketName, opts) +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go new file mode 100644 index 00000000..f1653afe --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go @@ -0,0 +1,148 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "math" + "os" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// Verify if reader is *minio.Object +func isObject(reader io.Reader) (ok bool) { + _, ok = reader.(*Object) + return +} + +// Verify if reader is a generic ReaderAt +func isReadAt(reader io.Reader) (ok bool) { + var v *os.File + v, ok = reader.(*os.File) + if ok { + // Stdin, Stdout and Stderr all have *os.File type + // which happen to also be io.ReaderAt compatible + // we need to add special conditions for them to + // be ignored by this function. + for _, f := range []string{ + "/dev/stdin", + "/dev/stdout", + "/dev/stderr", + } { + if f == v.Name() { + ok = false + break + } + } + } else { + _, ok = reader.(io.ReaderAt) + } + return +} + +// OptimalPartInfo - calculate the optimal part info for a given +// object size. +// +// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible +// object storage it will have the following parameters as constants. +// +// maxPartsCount - 10000 +// minPartSize - 16MiB +// maxMultipartPutObjectSize - 5TiB +// +func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { + // object size is '-1' set it to 5TiB. + var unknownSize bool + if objectSize == -1 { + unknownSize = true + objectSize = maxMultipartPutObjectSize + } + + // object size is larger than supported maximum. + if objectSize > maxMultipartPutObjectSize { + err = errEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") + return + } + + var partSizeFlt float64 + if configuredPartSize > 0 { + if int64(configuredPartSize) > objectSize { + err = errEntityTooLarge(int64(configuredPartSize), objectSize, "", "") + return + } + + if !unknownSize { + if objectSize > (int64(configuredPartSize) * maxPartsCount) { + err = errInvalidArgument("Part size * max_parts(10000) is lesser than input objectSize.") + return + } + } + + if configuredPartSize < absMinPartSize { + err = errInvalidArgument("Input part size is smaller than allowed minimum of 5MiB.") + return + } + + if configuredPartSize > maxPartSize { + err = errInvalidArgument("Input part size is bigger than allowed maximum of 5GiB.") + return + } + + partSizeFlt = float64(configuredPartSize) + if unknownSize { + // If input has unknown size and part size is configured + // keep it to maximum allowed as per 10000 parts. + objectSize = int64(configuredPartSize) * maxPartsCount + } + } else { + configuredPartSize = minPartSize + // Use floats for part size for all calculations to avoid + // overflows during float64 to int64 conversions. + partSizeFlt = float64(objectSize / maxPartsCount) + partSizeFlt = math.Ceil(partSizeFlt/float64(configuredPartSize)) * float64(configuredPartSize) + } + + // Total parts count. + totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt)) + // Part size. + partSize = int64(partSizeFlt) + // Last part size. + lastPartSize = objectSize - int64(totalPartsCount-1)*partSize + return totalPartsCount, partSize, lastPartSize, nil +} + +// getUploadID - fetch upload id if already present for an object name +// or initiate a new request to fetch a new upload id. +func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return "", err + } + + // Initiate multipart upload for an object. + initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts) + if err != nil { + return "", err + } + return initMultipartUploadResult.UploadID, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go new file mode 100644 index 00000000..6c0f20df --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go @@ -0,0 +1,64 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "mime" + "os" + "path/filepath" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// FPutObject - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. +func (c Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Open the referenced file. + fileReader, err := os.Open(filePath) + // If any error fail quickly here. + if err != nil { + return UploadInfo{}, err + } + defer fileReader.Close() + + // Save the file stat. + fileStat, err := fileReader.Stat() + if err != nil { + return UploadInfo{}, err + } + + // Save the file size. + fileSize := fileStat.Size() + + // Set contentType based on filepath extension if not given or default + // value of "application/octet-stream" if the extension has no associated type. + if opts.ContentType == "" { + if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" { + opts.ContentType = "application/octet-stream" + } + } + return c.PutObject(ctx, bucketName, objectName, fileReader, fileSize, opts) +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go new file mode 100644 index 00000000..a70d7054 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -0,0 +1,393 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + + "github.com/google/uuid" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, + opts PutObjectOptions) (info UploadInfo, err error) { + info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + } + return info, err +} + +func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Total data read and written to server. should be equal to + // 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Part number always starts with '1'. + partNumber := 1 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + buf := make([]byte, partSize) + + for partNumber <= totalPartsCount { + // Choose hash algorithms to be calculated by hashCopyN, + // avoid sha256 with non-v4 signature request or + // HTTPS connection. + hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5) + + length, rErr := readFull(reader, buf) + if rErr == io.EOF && partNumber > 1 { + break + } + + if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { + return UploadInfo{}, rErr + } + + // Calculates hash sums while copying partSize bytes into cw. + for k, v := range hashAlgos { + v.Write(buf[:length]) + hashSums[k] = v.Sum(nil) + v.Close() + } + + // Update progress reader appropriately to the latest offset + // as we read from the source. + rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) + + // Checksums.. + var ( + md5Base64 string + sha256Hex string + ) + if hashSums["md5"] != nil { + md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) + } + if hashSums["sha256"] != nil { + sha256Hex = hex.EncodeToString(hashSums["sha256"]) + } + + // Proceed to upload the part. + objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, + md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption) + if uerr != nil { + return UploadInfo{}, uerr + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += int64(length) + + // Increment part number. + partNumber++ + + // For unknown size, Read EOF we break away. + // We do not have to upload till totalPartsCount. + if rErr == io.EOF { + break + } + } + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} + +// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. +func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return initiateMultipartUploadResult{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return initiateMultipartUploadResult{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploads", "") + + if opts.Internal.SourceVersionID != "" { + if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { + return initiateMultipartUploadResult{}, errInvalidArgument(err.Error()) + } + urlValues.Set("versionId", opts.Internal.SourceVersionID) + } + + // Set ContentType header. + customHeader := opts.Header() + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: customHeader, + } + + // Execute POST on an objectName to initiate multipart upload. + resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) + defer closeResponse(resp) + if err != nil { + return initiateMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode xml for new multipart upload. + initiateMultipartUploadResult := initiateMultipartUploadResult{} + err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) + if err != nil { + return initiateMultipartUploadResult, err + } + return initiateMultipartUploadResult, nil +} + +// uploadPart - Uploads a part in a multipart upload. +func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, + partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ObjectPart{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return ObjectPart{}, err + } + if size > maxPartSize { + return ObjectPart{}, errEntityTooLarge(size, maxPartSize, bucketName, objectName) + } + if size <= -1 { + return ObjectPart{}, errEntityTooSmall(size, bucketName, objectName) + } + if partNumber <= 0 { + return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") + } + if uploadID == "" { + return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") + } + + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number. + urlValues.Set("partNumber", strconv.Itoa(partNumber)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + // Set encryption headers, if any. + customHeader := make(http.Header) + // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html + // Server-side encryption is supported by the S3 Multipart Upload actions. + // Unless you are using a customer-provided encryption key, you don't need + // to specify the encryption parameters in each UploadPart request. + if sse != nil && sse.Type() == encrypt.SSEC { + sse.Marshal(customHeader) + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: customHeader, + contentBody: reader, + contentLength: size, + contentMD5Base64: md5Base64, + contentSHA256Hex: sha256Hex, + } + + // Execute PUT on each part. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return ObjectPart{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Once successfully uploaded, return completed part. + objPart := ObjectPart{} + objPart.Size = size + objPart.PartNumber = partNumber + // Trim off the odd double quotes from ETag in the beginning and end. + objPart.ETag = trimEtag(resp.Header.Get("ETag")) + return objPart, nil +} + +// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. +func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, + complete completeMultipartUpload) (UploadInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + // Marshal complete multipart body. + completeMultipartUploadBytes, err := xml.Marshal(complete) + if err != nil { + return UploadInfo{}, err + } + + // Instantiate all the complete multipart buffer. + completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: completeMultipartUploadBuffer, + contentLength: int64(len(completeMultipartUploadBytes)), + contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), + } + + // Execute POST to complete multipart upload for an objectName. + resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) + defer closeResponse(resp) + if err != nil { + return UploadInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Read resp.Body into a []bytes to parse for Error response inside the body + var b []byte + b, err = ioutil.ReadAll(resp.Body) + if err != nil { + return UploadInfo{}, err + } + // Decode completed multipart upload response on success. + completeMultipartUploadResult := completeMultipartUploadResult{} + err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) + if err != nil { + // xml parsing failure due to presence an ill-formed xml fragment + return UploadInfo{}, err + } else if completeMultipartUploadResult.Bucket == "" { + // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. + // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values + // of the members. + + // Decode completed multipart upload response on failure + completeMultipartUploadErr := ErrorResponse{} + err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) + if err != nil { + // xml parsing failure due to presence an ill-formed xml fragment + return UploadInfo{}, err + } + return UploadInfo{}, completeMultipartUploadErr + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) + + return UploadInfo{ + Bucket: completeMultipartUploadResult.Bucket, + Key: completeMultipartUploadResult.Key, + ETag: trimEtag(completeMultipartUploadResult.ETag), + VersionID: resp.Header.Get(amzVersionID), + Location: completeMultipartUploadResult.Location, + Expiration: expTime, + ExpirationRuleID: ruleID, + }, nil + +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go new file mode 100644 index 00000000..39e381e9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -0,0 +1,487 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" + + "github.com/google/uuid" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// putObjectMultipartStream - upload a large object using +// multipart upload and streaming signature for signing payload. +// Comprehensive put object operation involving multipart uploads. +// +// Following code handles these types of readers. +// +// - *minio.Object +// - Any reader which has a method 'ReadAt()' +// +func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { + + if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { + // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. + info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) + } else { + info, err = c.putObjectMultipartStreamOptionalChecksum(ctx, bucketName, objectName, reader, size, opts) + } + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + } + return info, err +} + +// uploadedPartRes - the response received from a part upload. +type uploadedPartRes struct { + Error error // Any error encountered while uploading the part. + PartNum int // Number of the part uploaded. + Size int64 // Size of the part uploaded. + Part ObjectPart +} + +type uploadPartReq struct { + PartNum int // Number of the part uploaded. + Part ObjectPart // Size of the part uploaded. +} + +// putObjectMultipartFromReadAt - Uploads files bigger than 128MiB. +// Supports all readers which implements io.ReaderAt interface +// (ReadAt method). +// +// NOTE: This function is meant to be used for all readers which +// implement io.ReaderAt which allows us for resuming multipart +// uploads but reading at an offset, which would avoid re-read the +// data which was already uploaded. Internally this function uses +// temporary files for staging all the data, these temporary files are +// cleaned automatically when the caller i.e http client closes the +// stream after uploading all the contents successfully. +func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, + reader io.ReaderAt, size int64, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + + // Aborts the multipart upload in progress, if the + // function returns any error, since we do not resume + // we should purge the parts which have been uploaded + // to relinquish storage space. + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Declare a channel that sends the next part number to be uploaded. + // Buffered to 10000 because thats the maximum number of parts allowed + // by S3. + uploadPartsCh := make(chan uploadPartReq, 10000) + + // Declare a channel that sends back the response of a part upload. + // Buffered to 10000 because thats the maximum number of parts allowed + // by S3. + uploadedPartsCh := make(chan uploadedPartRes, 10000) + + // Used for readability, lastPartNumber is always totalPartsCount. + lastPartNumber := totalPartsCount + + // Send each part number to the channel to be processed. + for p := 1; p <= totalPartsCount; p++ { + uploadPartsCh <- uploadPartReq{PartNum: p} + } + close(uploadPartsCh) + + var partsBuf = make([][]byte, opts.getNumThreads()) + for i := range partsBuf { + partsBuf[i] = make([]byte, 0, partSize) + } + + // Receive each part number from the channel allowing three parallel uploads. + for w := 1; w <= opts.getNumThreads(); w++ { + go func(w int, partSize int64) { + // Each worker will draw from the part channel and upload in parallel. + for uploadReq := range uploadPartsCh { + + // If partNumber was not uploaded we calculate the missing + // part offset and size. For all other part numbers we + // calculate offset based on multiples of partSize. + readOffset := int64(uploadReq.PartNum-1) * partSize + + // As a special case if partNumber is lastPartNumber, we + // calculate the offset based on the last part size. + if uploadReq.PartNum == lastPartNumber { + readOffset = (size - lastPartSize) + partSize = lastPartSize + } + + n, rerr := readFull(io.NewSectionReader(reader, readOffset, partSize), partsBuf[w-1][:partSize]) + if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { + uploadedPartsCh <- uploadedPartRes{ + Error: rerr, + } + // Exit the goroutine. + return + } + + // Get a section reader on a particular offset. + hookReader := newHook(bytes.NewReader(partsBuf[w-1][:n]), opts.Progress) + + // Proceed to upload the part. + objPart, err := c.uploadPart(ctx, bucketName, objectName, + uploadID, hookReader, uploadReq.PartNum, + "", "", partSize, opts.ServerSideEncryption) + if err != nil { + uploadedPartsCh <- uploadedPartRes{ + Error: err, + } + // Exit the goroutine. + return + } + + // Save successfully uploaded part metadata. + uploadReq.Part = objPart + + // Send successful part info through the channel. + uploadedPartsCh <- uploadedPartRes{ + Size: objPart.Size, + PartNum: uploadReq.PartNum, + Part: uploadReq.Part, + } + } + }(w, partSize) + } + + // Gather the responses as they occur and update any + // progress bar. + for u := 1; u <= totalPartsCount; u++ { + uploadRes := <-uploadedPartsCh + if uploadRes.Error != nil { + return UploadInfo{}, uploadRes.Error + } + // Update the totalUploadedSize. + totalUploadedSize += uploadRes.Size + // Store the parts to be completed in order. + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: uploadRes.Part.ETag, + PartNumber: uploadRes.Part.PartNumber, + }) + } + + // Verify if we uploaded all the data. + if totalUploadedSize != size { + return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} + +func (c Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + // Initiates a new multipart request + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + + // Aborts the multipart upload if the function returns + // any error, since we do not resume we should purge + // the parts which have been uploaded to relinquish + // storage space. + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + buf := make([]byte, partSize) + + // Avoid declaring variables in the for loop + var md5Base64 string + var hookReader io.Reader + + // Part number always starts with '1'. + var partNumber int + for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { + + // Proceed to upload the part. + if partNumber == totalPartsCount { + partSize = lastPartSize + } + + if opts.SendContentMd5 { + length, rerr := readFull(reader, buf) + if rerr == io.EOF && partNumber > 1 { + break + } + + if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { + return UploadInfo{}, rerr + } + + // Calculate md5sum. + hash := c.md5Hasher() + hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + hash.Close() + + // Update progress reader appropriately to the latest offset + // as we read from the source. + hookReader = newHook(bytes.NewReader(buf[:length]), opts.Progress) + } else { + // Update progress reader appropriately to the latest offset + // as we read from the source. + hookReader = newHook(reader, opts.Progress) + } + + objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, + io.LimitReader(hookReader, partSize), + partNumber, md5Base64, "", partSize, opts.ServerSideEncryption) + if uerr != nil { + return UploadInfo{}, uerr + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += partSize + } + + // Verify if we uploaded all the data. + if size > 0 { + if totalUploadedSize != size { + return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + } + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} + +// putObject special function used Google Cloud Storage. This special function +// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. +func (c Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Size -1 is only supported on Google Cloud Storage, we error + // out in all other situations. + if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) { + return UploadInfo{}, errEntityTooSmall(size, bucketName, objectName) + } + + if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 { + return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'") + } + + if size > 0 { + if isReadAt(reader) && !isObject(reader) { + seeker, ok := reader.(io.Seeker) + if ok { + offset, err := seeker.Seek(0, io.SeekCurrent) + if err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } + reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) + } + } + } + + var md5Base64 string + if opts.SendContentMd5 { + // Create a buffer. + buf := make([]byte, size) + + length, rErr := readFull(reader, buf) + if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { + return UploadInfo{}, rErr + } + + // Calculate md5sum. + hash := c.md5Hasher() + hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + reader = bytes.NewReader(buf[:length]) + hash.Close() + } + + // Update progress reader appropriately to the latest offset as we + // read from the source. + readSeeker := newHook(reader, opts.Progress) + + // This function does not calculate sha256 and md5sum for payload. + // Execute put object. + return c.putObjectDo(ctx, bucketName, objectName, readSeeker, md5Base64, "", size, opts) +} + +// putObjectDo - executes the put object http operation. +// NOTE: You must have WRITE permissions on a bucket to add an object to it. +func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + // Set headers. + customHeader := opts.Header() + + // Populate request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeader, + contentBody: reader, + contentLength: size, + contentMD5Base64: md5Base64, + contentSHA256Hex: sha256Hex, + } + if opts.Internal.SourceVersionID != "" { + if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } + urlValues := make(url.Values) + urlValues.Set("versionId", opts.Internal.SourceVersionID) + reqMetadata.queryValues = urlValues + } + + // Execute PUT an objectName. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return UploadInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) + + return UploadInfo{ + Bucket: bucketName, + Key: objectName, + ETag: trimEtag(resp.Header.Get("ETag")), + VersionID: resp.Header.Get(amzVersionID), + Size: size, + Expiration: expTime, + ExpirationRuleID: ruleID, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go new file mode 100644 index 00000000..247e40a6 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -0,0 +1,370 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net/http" + "sort" + "time" + + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" + "golang.org/x/net/http/httpguts" +) + +// ReplicationStatus represents replication status of object +type ReplicationStatus string + +const ( + // ReplicationStatusPending indicates replication is pending + ReplicationStatusPending ReplicationStatus = "PENDING" + // ReplicationStatusComplete indicates replication completed ok + ReplicationStatusComplete ReplicationStatus = "COMPLETE" + // ReplicationStatusFailed indicates replication failed + ReplicationStatusFailed ReplicationStatus = "FAILED" + // ReplicationStatusReplica indicates object is a replica of a source + ReplicationStatusReplica ReplicationStatus = "REPLICA" +) + +// Empty returns true if no replication status set. +func (r ReplicationStatus) Empty() bool { + return r == "" +} + +// AdvancedPutOptions for internal use - to be utilized by replication, ILM transition +// implementation on MinIO server +type AdvancedPutOptions struct { + SourceVersionID string + SourceETag string + ReplicationStatus ReplicationStatus + SourceMTime time.Time + ReplicationRequest bool +} + +// PutObjectOptions represents options specified by user for PutObject call +type PutObjectOptions struct { + UserMetadata map[string]string + UserTags map[string]string + Progress io.Reader + ContentType string + ContentEncoding string + ContentDisposition string + ContentLanguage string + CacheControl string + Mode RetentionMode + RetainUntilDate time.Time + ServerSideEncryption encrypt.ServerSide + NumThreads uint + StorageClass string + WebsiteRedirectLocation string + PartSize uint64 + LegalHold LegalHoldStatus + SendContentMd5 bool + DisableMultipart bool + Internal AdvancedPutOptions +} + +// getNumThreads - gets the number of threads to be used in the multipart +// put object operation +func (opts PutObjectOptions) getNumThreads() (numThreads int) { + if opts.NumThreads > 0 { + numThreads = int(opts.NumThreads) + } else { + numThreads = totalWorkers + } + return +} + +// Header - constructs the headers from metadata entered by user in +// PutObjectOptions struct +func (opts PutObjectOptions) Header() (header http.Header) { + header = make(http.Header) + + contentType := opts.ContentType + if contentType == "" { + contentType = "application/octet-stream" + } + header.Set("Content-Type", contentType) + + if opts.ContentEncoding != "" { + header.Set("Content-Encoding", opts.ContentEncoding) + } + if opts.ContentDisposition != "" { + header.Set("Content-Disposition", opts.ContentDisposition) + } + if opts.ContentLanguage != "" { + header.Set("Content-Language", opts.ContentLanguage) + } + if opts.CacheControl != "" { + header.Set("Cache-Control", opts.CacheControl) + } + + if opts.Mode != "" { + header.Set(amzLockMode, opts.Mode.String()) + } + + if !opts.RetainUntilDate.IsZero() { + header.Set("X-Amz-Object-Lock-Retain-Until-Date", opts.RetainUntilDate.Format(time.RFC3339)) + } + + if opts.LegalHold != "" { + header.Set(amzLegalHoldHeader, opts.LegalHold.String()) + } + + if opts.ServerSideEncryption != nil { + opts.ServerSideEncryption.Marshal(header) + } + + if opts.StorageClass != "" { + header.Set(amzStorageClass, opts.StorageClass) + } + + if opts.WebsiteRedirectLocation != "" { + header.Set(amzWebsiteRedirectLocation, opts.WebsiteRedirectLocation) + } + + if !opts.Internal.ReplicationStatus.Empty() { + header.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) + } + if !opts.Internal.SourceMTime.IsZero() { + header.Set(minIOBucketSourceMTime, opts.Internal.SourceMTime.Format(time.RFC3339Nano)) + } + if opts.Internal.SourceETag != "" { + header.Set(minIOBucketSourceETag, opts.Internal.SourceETag) + } + if opts.Internal.ReplicationRequest { + header.Set(minIOBucketReplicationRequest, "") + } + if len(opts.UserTags) != 0 { + header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags)) + } + + for k, v := range opts.UserMetadata { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { + header.Set(k, v) + } else { + header.Set("x-amz-meta-"+k, v) + } + } + return +} + +// validate() checks if the UserMetadata map has standard headers or and raises an error if so. +func (opts PutObjectOptions) validate() (err error) { + for k, v := range opts.UserMetadata { + if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { + return errInvalidArgument(k + " unsupported user defined metadata name") + } + if !httpguts.ValidHeaderFieldValue(v) { + return errInvalidArgument(v + " unsupported user defined metadata value") + } + } + if opts.Mode != "" && !opts.Mode.IsValid() { + return errInvalidArgument(opts.Mode.String() + " unsupported retention mode") + } + if opts.LegalHold != "" && !opts.LegalHold.IsValid() { + return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status") + } + return nil +} + +// completedParts is a collection of parts sortable by their part numbers. +// used for sorting the uploaded parts before completing the multipart request. +type completedParts []CompletePart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } + +// PutObject creates an object in a bucket. +// +// You must have WRITE permissions on a bucket to create an object. +// +// - For size smaller than 128MiB PutObject automatically does a +// single atomic Put operation. +// - For size larger than 128MiB PutObject automatically does a +// multipart Put operation. +// - For size input as -1 PutObject does a multipart Put operation +// until input stream reaches EOF. Maximum object size that can +// be uploaded through this operation will be 5TiB. +func (c Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, + opts PutObjectOptions) (info UploadInfo, err error) { + if objectSize < 0 && opts.DisableMultipart { + return UploadInfo{}, errors.New("object size must be provided with disable multipart upload") + } + + err = opts.validate() + if err != nil { + return UploadInfo{}, err + } + + return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) +} + +func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { + // Check for largest object size allowed. + if size > int64(maxMultipartPutObjectSize) { + return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) + } + + // NOTE: Streaming signature is not supported by GCS. + if s3utils.IsGoogleEndpoint(*c.endpointURL) { + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + + partSize := opts.PartSize + if opts.PartSize == 0 { + partSize = minPartSize + } + + if c.overrideSignerType.IsV2() { + if size >= 0 && size < int64(partSize) || opts.DisableMultipart { + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts) + } + + if size < 0 { + return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) + } + + if size < int64(partSize) || opts.DisableMultipart { + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + + return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) +} + +func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Total data read and written to server. should be equal to + // 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Part number always starts with '1'. + partNumber := 1 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + buf := make([]byte, partSize) + + for partNumber <= totalPartsCount { + length, rerr := readFull(reader, buf) + if rerr == io.EOF && partNumber > 1 { + break + } + + if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF { + return UploadInfo{}, rerr + } + + var md5Base64 string + if opts.SendContentMd5 { + // Calculate md5sum. + hash := c.md5Hasher() + hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + hash.Close() + } + + // Update progress reader appropriately to the latest offset + // as we read from the source. + rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) + + // Proceed to upload the part. + objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, + md5Base64, "", int64(length), opts.ServerSideEncryption) + if uerr != nil { + return UploadInfo{}, uerr + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += int64(length) + + // Increment part number. + partNumber++ + + // For unknown size, Read EOF we break away. + // We do not have to upload till totalPartsCount. + if rerr == io.EOF { + break + } + } + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go new file mode 100644 index 00000000..f21a72c9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-remove.go @@ -0,0 +1,419 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "io" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// RemoveBucket deletes the bucket name. +// +// All objects (including all object versions and delete markers). +// in the bucket must be deleted before successfully attempting this request. +func (c Client) RemoveBucket(ctx context.Context, bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + // Execute DELETE on bucket. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Remove the location from cache on a successful delete. + c.bucketLocCache.Delete(bucketName) + + return nil +} + +// AdvancedRemoveOptions intended for internal use by replication +type AdvancedRemoveOptions struct { + ReplicationDeleteMarker bool + ReplicationStatus ReplicationStatus + ReplicationMTime time.Time + ReplicationRequest bool +} + +// RemoveObjectOptions represents options specified by user for RemoveObject call +type RemoveObjectOptions struct { + GovernanceBypass bool + VersionID string + Internal AdvancedRemoveOptions +} + +// RemoveObject removes an object from a bucket. +func (c Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + return c.removeObject(ctx, bucketName, objectName, opts) +} + +func (c Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Build headers. + headers := make(http.Header) + + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } + if opts.Internal.ReplicationDeleteMarker { + headers.Set(minIOBucketReplicationDeleteMarker, "true") + } + if !opts.Internal.ReplicationMTime.IsZero() { + headers.Set(minIOBucketSourceMTime, opts.Internal.ReplicationMTime.Format(time.RFC3339Nano)) + } + if !opts.Internal.ReplicationStatus.Empty() { + headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) + } + if opts.Internal.ReplicationRequest { + headers.Set(minIOBucketReplicationRequest, "") + } + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + contentSHA256Hex: emptySHA256Hex, + queryValues: urlValues, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + // if some unexpected error happened and max retry is reached, we want to let client know + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // DeleteObject always responds with http '204' even for + // objects which do not exist. So no need to handle them + // specifically. + return nil +} + +// RemoveObjectError - container of Multi Delete S3 API error +type RemoveObjectError struct { + ObjectName string + VersionID string + Err error +} + +// generateRemoveMultiObjects - generate the XML request for remove multi objects request +func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte { + delObjects := []deleteObject{} + for _, obj := range objects { + delObjects = append(delObjects, deleteObject{ + Key: obj.Key, + VersionID: obj.VersionID, + }) + } + xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: delObjects, Quiet: true}) + return xmlBytes +} + +// processRemoveMultiObjectsResponse - parse the remove multi objects web service +// and return the success/failure result status for each object +func processRemoveMultiObjectsResponse(body io.Reader, objects []ObjectInfo, errorCh chan<- RemoveObjectError) { + // Parse multi delete XML response + rmResult := &deleteMultiObjectsResult{} + err := xmlDecoder(body, rmResult) + if err != nil { + errorCh <- RemoveObjectError{ObjectName: "", Err: err} + return + } + + // Fill deletion that returned an error. + for _, obj := range rmResult.UnDeletedObjects { + // Version does not exist is not an error ignore and continue. + switch obj.Code { + case "InvalidArgument", "NoSuchVersion": + continue + } + errorCh <- RemoveObjectError{ + ObjectName: obj.Key, + VersionID: obj.VersionID, + Err: ErrorResponse{ + Code: obj.Code, + Message: obj.Message, + }, + } + } +} + +// RemoveObjectsOptions represents options specified by user for RemoveObjects call +type RemoveObjectsOptions struct { + GovernanceBypass bool +} + +// RemoveObjects removes multiple objects from a bucket while +// it is possible to specify objects versions which are received from +// objectsCh. Remove failures are sent back via error channel. +func (c Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError { + errorCh := make(chan RemoveObjectError, 1) + + // Validate if bucket name is valid. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(errorCh) + errorCh <- RemoveObjectError{ + Err: err, + } + return errorCh + } + // Validate objects channel to be properly allocated. + if objectsCh == nil { + defer close(errorCh) + errorCh <- RemoveObjectError{ + Err: errInvalidArgument("Objects channel cannot be nil"), + } + return errorCh + } + + go c.removeObjects(ctx, bucketName, objectsCh, errorCh, opts) + return errorCh +} + +// Return true if the character is within the allowed characters in an XML 1.0 document +// The list of allowed characters can be found here: https://www.w3.org/TR/xml/#charsets +func validXMLChar(r rune) (ok bool) { + return r == 0x09 || + r == 0x0A || + r == 0x0D || + r >= 0x20 && r <= 0xD7FF || + r >= 0xE000 && r <= 0xFFFD || + r >= 0x10000 && r <= 0x10FFFF +} + +func hasInvalidXMLChar(str string) bool { + for _, s := range str { + if !validXMLChar(s) { + return true + } + } + return false +} + +// Generate and call MultiDelete S3 requests based on entries received from objectsCh +func (c Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, errorCh chan<- RemoveObjectError, opts RemoveObjectsOptions) { + maxEntries := 1000 + finish := false + urlValues := make(url.Values) + urlValues.Set("delete", "") + + // Close error channel when Multi delete finishes. + defer close(errorCh) + + // Loop over entries by 1000 and call MultiDelete requests + for { + if finish { + break + } + count := 0 + var batch []ObjectInfo + + // Try to gather 1000 entries + for object := range objectsCh { + if hasInvalidXMLChar(object.Key) { + // Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document. + err := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{ + VersionID: object.VersionID, + GovernanceBypass: opts.GovernanceBypass, + }) + if err != nil { + // Version does not exist is not an error ignore and continue. + switch ToErrorResponse(err).Code { + case "InvalidArgument", "NoSuchVersion": + continue + } + errorCh <- RemoveObjectError{ + ObjectName: object.Key, + VersionID: object.VersionID, + Err: err, + } + } + continue + } + + batch = append(batch, object) + if count++; count >= maxEntries { + break + } + } + if count == 0 { + // Multi Objects Delete API doesn't accept empty object list, quit immediately + break + } + if count < maxEntries { + // We didn't have 1000 entries, so this is the last batch + finish = true + } + + // Build headers. + headers := make(http.Header) + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } + + // Generate remove multi objects XML request + removeBytes := generateRemoveMultiObjectsRequest(batch) + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(removeBytes), + contentLength: int64(len(removeBytes)), + contentMD5Base64: sumMD5Base64(removeBytes), + contentSHA256Hex: sum256Hex(removeBytes), + customHeader: headers, + }) + if resp != nil { + if resp.StatusCode != http.StatusOK { + e := httpRespToErrorResponse(resp, bucketName, "") + errorCh <- RemoveObjectError{ObjectName: "", Err: e} + } + } + if err != nil { + for _, b := range batch { + errorCh <- RemoveObjectError{ + ObjectName: b.Key, + VersionID: b.VersionID, + Err: err, + } + } + continue + } + + // Process multiobjects remove xml response + processRemoveMultiObjectsResponse(resp.Body, batch, errorCh) + + closeResponse(resp) + } +} + +// RemoveIncompleteUpload aborts an partially uploaded object. +func (c Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + // Find multipart upload ids of the object to be aborted. + uploadIDs, err := c.findUploadIDs(ctx, bucketName, objectName) + if err != nil { + return err + } + + for _, uploadID := range uploadIDs { + // abort incomplete multipart upload, based on the upload id passed. + err := c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + if err != nil { + return err + } + } + + return nil +} + +// abortMultipartUpload aborts a multipart upload for the given +// uploadID, all previously uploaded parts are deleted. +func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + + // Execute DELETE on multipart upload. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + // Abort has no response body, handle it for any errors. + var errorResponse ErrorResponse + switch resp.StatusCode { + case http.StatusNotFound: + // This is needed specifically for abort and it cannot + // be converged into default case. + errorResponse = ErrorResponse{ + Code: "NoSuchUpload", + Message: "The specified multipart upload does not exist.", + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + default: + return httpRespToErrorResponse(resp, bucketName, objectName) + } + return errorResponse + } + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go new file mode 100644 index 00000000..37ed97b7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go @@ -0,0 +1,361 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "errors" + "io" + "reflect" + "time" +) + +// listAllMyBucketsResult container for listBuckets response. +type listAllMyBucketsResult struct { + // Container for one or more buckets. + Buckets struct { + Bucket []BucketInfo + } + Owner owner +} + +// owner container for bucket owner information. +type owner struct { + DisplayName string + ID string +} + +// CommonPrefix container for prefix response. +type CommonPrefix struct { + Prefix string +} + +// ListBucketV2Result container for listObjects response version 2. +type ListBucketV2Result struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []CommonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + MaxKeys int64 + Name string + + // Hold the token that will be sent in the next request to fetch the next group of keys + NextContinuationToken string + + ContinuationToken string + Prefix string + + // FetchOwner and StartAfter are currently not used + FetchOwner string + StartAfter string +} + +// Version is an element in the list object versions response +type Version struct { + ETag string + IsLatest bool + Key string + LastModified time.Time + Owner Owner + Size int64 + StorageClass string + VersionID string `xml:"VersionId"` + + isDeleteMarker bool +} + +// ListVersionsResult is an element in the list object versions response +// and has a special Unmarshaler because we need to preserver the order +// of and in ListVersionsResult.Versions slice +type ListVersionsResult struct { + Versions []Version + + CommonPrefixes []CommonPrefix + Name string + Prefix string + Delimiter string + MaxKeys int64 + EncodingType string + IsTruncated bool + KeyMarker string + VersionIDMarker string + NextKeyMarker string + NextVersionIDMarker string +} + +// UnmarshalXML is a custom unmarshal code for the response of ListObjectVersions, the custom +// code will unmarshal and tags and save them in Versions field to +// preserve the lexical order of the listing. +func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { + for { + // Read tokens from the XML document in a stream. + t, err := d.Token() + if err != nil { + if err == io.EOF { + break + } + return err + } + + switch se := t.(type) { + case xml.StartElement: + tagName := se.Name.Local + switch tagName { + case "Name", "Prefix", + "Delimiter", "EncodingType", + "KeyMarker", "NextKeyMarker": + var s string + if err = d.DecodeElement(&s, &se); err != nil { + return err + } + v := reflect.ValueOf(l).Elem().FieldByName(tagName) + if v.IsValid() { + v.SetString(s) + } + case "VersionIdMarker": + // VersionIdMarker is a special case because of 'Id' instead of 'ID' in field name + var s string + if err = d.DecodeElement(&s, &se); err != nil { + return err + } + l.VersionIDMarker = s + case "NextVersionIdMarker": + // NextVersionIdMarker is a special case because of 'Id' instead of 'ID' in field name + var s string + if err = d.DecodeElement(&s, &se); err != nil { + return err + } + l.NextVersionIDMarker = s + case "IsTruncated": // bool + var b bool + if err = d.DecodeElement(&b, &se); err != nil { + return err + } + l.IsTruncated = b + case "MaxKeys": // int64 + var i int64 + if err = d.DecodeElement(&i, &se); err != nil { + return err + } + l.MaxKeys = i + case "CommonPrefixes": + var cp CommonPrefix + if err = d.DecodeElement(&cp, &se); err != nil { + return err + } + l.CommonPrefixes = append(l.CommonPrefixes, cp) + case "DeleteMarker", "Version": + var v Version + if err = d.DecodeElement(&v, &se); err != nil { + return err + } + if tagName == "DeleteMarker" { + v.isDeleteMarker = true + } + l.Versions = append(l.Versions, v) + default: + return errors.New("unrecognized option:" + tagName) + } + + } + } + return nil +} + +// ListBucketResult container for listObjects response. +type ListBucketResult struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []CommonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + Marker string + MaxKeys int64 + Name string + + // When response is truncated (the IsTruncated element value in + // the response is true), you can use the key name in this field + // as marker in the subsequent request to get next set of objects. + // Object storage lists objects in alphabetical order Note: This + // element is returned only if you have delimiter request + // parameter specified. If response does not include the NextMaker + // and it is truncated, you can use the value of the last Key in + // the response as the marker in the subsequent request to get the + // next set of object keys. + NextMarker string + Prefix string +} + +// ListMultipartUploadsResult container for ListMultipartUploads response +type ListMultipartUploadsResult struct { + Bucket string + KeyMarker string + UploadIDMarker string `xml:"UploadIdMarker"` + NextKeyMarker string + NextUploadIDMarker string `xml:"NextUploadIdMarker"` + EncodingType string + MaxUploads int64 + IsTruncated bool + Uploads []ObjectMultipartInfo `xml:"Upload"` + Prefix string + Delimiter string + // A response can contain CommonPrefixes only if you specify a delimiter. + CommonPrefixes []CommonPrefix +} + +// initiator container for who initiated multipart upload. +type initiator struct { + ID string + DisplayName string +} + +// copyObjectResult container for copy object response. +type copyObjectResult struct { + ETag string + LastModified time.Time // time string format "2006-01-02T15:04:05.000Z" +} + +// ObjectPart container for particular part of an object. +type ObjectPart struct { + // Part number identifies the part. + PartNumber int + + // Date and time the part was uploaded. + LastModified time.Time + + // Entity tag returned when the part was uploaded, usually md5sum + // of the part. + ETag string + + // Size of the uploaded part data. + Size int64 +} + +// ListObjectPartsResult container for ListObjectParts response. +type ListObjectPartsResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` + + Initiator initiator + Owner owner + + StorageClass string + PartNumberMarker int + NextPartNumberMarker int + MaxParts int + + // Indicates whether the returned list of parts is truncated. + IsTruncated bool + ObjectParts []ObjectPart `xml:"Part"` + + EncodingType string +} + +// initiateMultipartUploadResult container for InitiateMultiPartUpload +// response. +type initiateMultipartUploadResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` +} + +// completeMultipartUploadResult container for completed multipart +// upload response. +type completeMultipartUploadResult struct { + Location string + Bucket string + Key string + ETag string +} + +// CompletePart sub container lists individual part numbers and their +// md5sum, part of completeMultipartUpload. +type CompletePart struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` + + // Part number identifies the part. + PartNumber int + ETag string +} + +// completeMultipartUpload container for completing multipart upload. +type completeMultipartUpload struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` + Parts []CompletePart `xml:"Part"` +} + +// createBucketConfiguration container for bucket configuration. +type createBucketConfiguration struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` + Location string `xml:"LocationConstraint"` +} + +// deleteObject container for Delete element in MultiObjects Delete XML request +type deleteObject struct { + Key string + VersionID string `xml:"VersionId,omitempty"` +} + +// deletedObject container for Deleted element in MultiObjects Delete XML response +type deletedObject struct { + Key string + VersionID string `xml:"VersionId,omitempty"` + // These fields are ignored. + DeleteMarker bool + DeleteMarkerVersionID string +} + +// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response +type nonDeletedObject struct { + Key string + Code string + Message string + VersionID string `xml:"VersionId"` +} + +// deletedMultiObjects container for MultiObjects Delete XML request +type deleteMultiObjects struct { + XMLName xml.Name `xml:"Delete"` + Quiet bool + Objects []deleteObject `xml:"Object"` +} + +// deletedMultiObjectsResult container for MultiObjects Delete XML response +type deleteMultiObjectsResult struct { + XMLName xml.Name `xml:"DeleteResult"` + DeletedObjects []deletedObject `xml:"Deleted"` + UnDeletedObjects []nonDeletedObject `xml:"Error"` +} diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go new file mode 100644 index 00000000..e35cf02b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-select.go @@ -0,0 +1,751 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2018-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/xml" + "errors" + "fmt" + "hash" + "hash/crc32" + "io" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// CSVFileHeaderInfo - is the parameter for whether to utilize headers. +type CSVFileHeaderInfo string + +// Constants for file header info. +const ( + CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" + CSVFileHeaderInfoIgnore = "IGNORE" + CSVFileHeaderInfoUse = "USE" +) + +// SelectCompressionType - is the parameter for what type of compression is +// present +type SelectCompressionType string + +// Constants for compression types under select API. +const ( + SelectCompressionNONE SelectCompressionType = "NONE" + SelectCompressionGZIP = "GZIP" + SelectCompressionBZIP = "BZIP2" +) + +// CSVQuoteFields - is the parameter for how CSV fields are quoted. +type CSVQuoteFields string + +// Constants for csv quote styles. +const ( + CSVQuoteFieldsAlways CSVQuoteFields = "Always" + CSVQuoteFieldsAsNeeded = "AsNeeded" +) + +// QueryExpressionType - is of what syntax the expression is, this should only +// be SQL +type QueryExpressionType string + +// Constants for expression type. +const ( + QueryExpressionTypeSQL QueryExpressionType = "SQL" +) + +// JSONType determines json input serialization type. +type JSONType string + +// Constants for JSONTypes. +const ( + JSONDocumentType JSONType = "DOCUMENT" + JSONLinesType = "LINES" +) + +// ParquetInputOptions parquet input specific options +type ParquetInputOptions struct{} + +// CSVInputOptions csv input specific options +type CSVInputOptions struct { + FileHeaderInfo CSVFileHeaderInfo + fileHeaderInfoSet bool + + RecordDelimiter string + recordDelimiterSet bool + + FieldDelimiter string + fieldDelimiterSet bool + + QuoteCharacter string + quoteCharacterSet bool + + QuoteEscapeCharacter string + quoteEscapeCharacterSet bool + + Comments string + commentsSet bool +} + +// SetFileHeaderInfo sets the file header info in the CSV input options +func (c *CSVInputOptions) SetFileHeaderInfo(val CSVFileHeaderInfo) { + c.FileHeaderInfo = val + c.fileHeaderInfoSet = true +} + +// SetRecordDelimiter sets the record delimiter in the CSV input options +func (c *CSVInputOptions) SetRecordDelimiter(val string) { + c.RecordDelimiter = val + c.recordDelimiterSet = true +} + +// SetFieldDelimiter sets the field delimiter in the CSV input options +func (c *CSVInputOptions) SetFieldDelimiter(val string) { + c.FieldDelimiter = val + c.fieldDelimiterSet = true +} + +// SetQuoteCharacter sets the quote character in the CSV input options +func (c *CSVInputOptions) SetQuoteCharacter(val string) { + c.QuoteCharacter = val + c.quoteCharacterSet = true +} + +// SetQuoteEscapeCharacter sets the quote escape character in the CSV input options +func (c *CSVInputOptions) SetQuoteEscapeCharacter(val string) { + c.QuoteEscapeCharacter = val + c.quoteEscapeCharacterSet = true +} + +// SetComments sets the comments character in the CSV input options +func (c *CSVInputOptions) SetComments(val string) { + c.Comments = val + c.commentsSet = true +} + +// MarshalXML - produces the xml representation of the CSV input options struct +func (c CSVInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + if c.FileHeaderInfo != "" || c.fileHeaderInfoSet { + if err := e.EncodeElement(c.FileHeaderInfo, xml.StartElement{Name: xml.Name{Local: "FileHeaderInfo"}}); err != nil { + return err + } + } + + if c.RecordDelimiter != "" || c.recordDelimiterSet { + if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { + return err + } + } + + if c.FieldDelimiter != "" || c.fieldDelimiterSet { + if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil { + return err + } + } + + if c.QuoteCharacter != "" || c.quoteCharacterSet { + if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil { + return err + } + } + + if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet { + if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil { + return err + } + } + + if c.Comments != "" || c.commentsSet { + if err := e.EncodeElement(c.Comments, xml.StartElement{Name: xml.Name{Local: "Comments"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// CSVOutputOptions csv output specific options +type CSVOutputOptions struct { + QuoteFields CSVQuoteFields + quoteFieldsSet bool + + RecordDelimiter string + recordDelimiterSet bool + + FieldDelimiter string + fieldDelimiterSet bool + + QuoteCharacter string + quoteCharacterSet bool + + QuoteEscapeCharacter string + quoteEscapeCharacterSet bool +} + +// SetQuoteFields sets the quote field parameter in the CSV output options +func (c *CSVOutputOptions) SetQuoteFields(val CSVQuoteFields) { + c.QuoteFields = val + c.quoteFieldsSet = true +} + +// SetRecordDelimiter sets the record delimiter character in the CSV output options +func (c *CSVOutputOptions) SetRecordDelimiter(val string) { + c.RecordDelimiter = val + c.recordDelimiterSet = true +} + +// SetFieldDelimiter sets the field delimiter character in the CSV output options +func (c *CSVOutputOptions) SetFieldDelimiter(val string) { + c.FieldDelimiter = val + c.fieldDelimiterSet = true +} + +// SetQuoteCharacter sets the quote character in the CSV output options +func (c *CSVOutputOptions) SetQuoteCharacter(val string) { + c.QuoteCharacter = val + c.quoteCharacterSet = true +} + +// SetQuoteEscapeCharacter sets the quote escape character in the CSV output options +func (c *CSVOutputOptions) SetQuoteEscapeCharacter(val string) { + c.QuoteEscapeCharacter = val + c.quoteEscapeCharacterSet = true +} + +// MarshalXML - produces the xml representation of the CSVOutputOptions struct +func (c CSVOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + if c.QuoteFields != "" || c.quoteFieldsSet { + if err := e.EncodeElement(c.QuoteFields, xml.StartElement{Name: xml.Name{Local: "QuoteFields"}}); err != nil { + return err + } + } + + if c.RecordDelimiter != "" || c.recordDelimiterSet { + if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { + return err + } + } + + if c.FieldDelimiter != "" || c.fieldDelimiterSet { + if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil { + return err + } + } + + if c.QuoteCharacter != "" || c.quoteCharacterSet { + if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil { + return err + } + } + + if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet { + if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// JSONInputOptions json input specific options +type JSONInputOptions struct { + Type JSONType + typeSet bool +} + +// SetType sets the JSON type in the JSON input options +func (j *JSONInputOptions) SetType(typ JSONType) { + j.Type = typ + j.typeSet = true +} + +// MarshalXML - produces the xml representation of the JSONInputOptions struct +func (j JSONInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + if j.Type != "" || j.typeSet { + if err := e.EncodeElement(j.Type, xml.StartElement{Name: xml.Name{Local: "Type"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// JSONOutputOptions - json output specific options +type JSONOutputOptions struct { + RecordDelimiter string + recordDelimiterSet bool +} + +// SetRecordDelimiter sets the record delimiter in the JSON output options +func (j *JSONOutputOptions) SetRecordDelimiter(val string) { + j.RecordDelimiter = val + j.recordDelimiterSet = true +} + +// MarshalXML - produces the xml representation of the JSONOutputOptions struct +func (j JSONOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + if j.RecordDelimiter != "" || j.recordDelimiterSet { + if err := e.EncodeElement(j.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// SelectObjectInputSerialization - input serialization parameters +type SelectObjectInputSerialization struct { + CompressionType SelectCompressionType + Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` + CSV *CSVInputOptions `xml:"CSV,omitempty"` + JSON *JSONInputOptions `xml:"JSON,omitempty"` +} + +// SelectObjectOutputSerialization - output serialization parameters. +type SelectObjectOutputSerialization struct { + CSV *CSVOutputOptions `xml:"CSV,omitempty"` + JSON *JSONOutputOptions `xml:"JSON,omitempty"` +} + +// SelectObjectOptions - represents the input select body +type SelectObjectOptions struct { + XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"` + ServerSideEncryption encrypt.ServerSide `xml:"-"` + Expression string + ExpressionType QueryExpressionType + InputSerialization SelectObjectInputSerialization + OutputSerialization SelectObjectOutputSerialization + RequestProgress struct { + Enabled bool + } +} + +// Header returns the http.Header representation of the SelectObject options. +func (o SelectObjectOptions) Header() http.Header { + headers := make(http.Header) + if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { + o.ServerSideEncryption.Marshal(headers) + } + return headers +} + +// SelectObjectType - is the parameter which defines what type of object the +// operation is being performed on. +type SelectObjectType string + +// Constants for input data types. +const ( + SelectObjectTypeCSV SelectObjectType = "CSV" + SelectObjectTypeJSON = "JSON" + SelectObjectTypeParquet = "Parquet" +) + +// preludeInfo is used for keeping track of necessary information from the +// prelude. +type preludeInfo struct { + totalLen uint32 + headerLen uint32 +} + +// SelectResults is used for the streaming responses from the server. +type SelectResults struct { + pipeReader *io.PipeReader + resp *http.Response + stats *StatsMessage + progress *ProgressMessage +} + +// ProgressMessage is a struct for progress xml message. +type ProgressMessage struct { + XMLName xml.Name `xml:"Progress" json:"-"` + StatsMessage +} + +// StatsMessage is a struct for stat xml message. +type StatsMessage struct { + XMLName xml.Name `xml:"Stats" json:"-"` + BytesScanned int64 + BytesProcessed int64 + BytesReturned int64 +} + +// messageType represents the type of message. +type messageType string + +const ( + errorMsg messageType = "error" + commonMsg = "event" +) + +// eventType represents the type of event. +type eventType string + +// list of event-types returned by Select API. +const ( + endEvent eventType = "End" + recordsEvent = "Records" + progressEvent = "Progress" + statsEvent = "Stats" +) + +// contentType represents content type of event. +type contentType string + +const ( + xmlContent contentType = "text/xml" +) + +// SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API. +func (c Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + + selectReqBytes, err := xml.Marshal(opts) + if err != nil { + return nil, err + } + + urlValues := make(url.Values) + urlValues.Set("select", "") + urlValues.Set("select-type", "2") + + // Execute POST on bucket/object. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: opts.Header(), + contentMD5Base64: sumMD5Base64(selectReqBytes), + contentSHA256Hex: sum256Hex(selectReqBytes), + contentBody: bytes.NewReader(selectReqBytes), + contentLength: int64(len(selectReqBytes)), + }) + if err != nil { + return nil, err + } + + return NewSelectResults(resp, bucketName) +} + +// NewSelectResults creates a Select Result parser that parses the response +// and returns a Reader that will return parsed and assembled select output. +func NewSelectResults(resp *http.Response, bucketName string) (*SelectResults, error) { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + pipeReader, pipeWriter := io.Pipe() + streamer := &SelectResults{ + resp: resp, + stats: &StatsMessage{}, + progress: &ProgressMessage{}, + pipeReader: pipeReader, + } + streamer.start(pipeWriter) + return streamer, nil +} + +// Close - closes the underlying response body and the stream reader. +func (s *SelectResults) Close() error { + defer closeResponse(s.resp) + return s.pipeReader.Close() +} + +// Read - is a reader compatible implementation for SelectObjectContent records. +func (s *SelectResults) Read(b []byte) (n int, err error) { + return s.pipeReader.Read(b) +} + +// Stats - information about a request's stats when processing is complete. +func (s *SelectResults) Stats() *StatsMessage { + return s.stats +} + +// Progress - information about the progress of a request. +func (s *SelectResults) Progress() *ProgressMessage { + return s.progress +} + +// start is the main function that decodes the large byte array into +// several events that are sent through the eventstream. +func (s *SelectResults) start(pipeWriter *io.PipeWriter) { + go func() { + for { + var prelude preludeInfo + var headers = make(http.Header) + var err error + + // Create CRC code + crc := crc32.New(crc32.IEEETable) + crcReader := io.TeeReader(s.resp.Body, crc) + + // Extract the prelude(12 bytes) into a struct to extract relevant information. + prelude, err = processPrelude(crcReader, crc) + if err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + + // Extract the headers(variable bytes) into a struct to extract relevant information + if prelude.headerLen > 0 { + if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + } + + // Get the actual payload length so that the appropriate amount of + // bytes can be read or parsed. + payloadLen := prelude.PayloadLen() + + m := messageType(headers.Get("message-type")) + + switch m { + case errorMsg: + pipeWriter.CloseWithError(errors.New(headers.Get("error-code") + ":\"" + headers.Get("error-message") + "\"")) + closeResponse(s.resp) + return + case commonMsg: + // Get content-type of the payload. + c := contentType(headers.Get("content-type")) + + // Get event type of the payload. + e := eventType(headers.Get("event-type")) + + // Handle all supported events. + switch e { + case endEvent: + pipeWriter.Close() + closeResponse(s.resp) + return + case recordsEvent: + if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + case progressEvent: + switch c { + case xmlContent: + if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + default: + pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent)) + closeResponse(s.resp) + return + } + case statsEvent: + switch c { + case xmlContent: + if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + default: + pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent)) + closeResponse(s.resp) + return + } + } + } + + // Ensures that the full message's CRC is correct and + // that the message is not corrupted + if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + + } + }() +} + +// PayloadLen is a function that calculates the length of the payload. +func (p preludeInfo) PayloadLen() int64 { + return int64(p.totalLen - p.headerLen - 16) +} + +// processPrelude is the function that reads the 12 bytes of the prelude and +// ensures the CRC is correct while also extracting relevant information into +// the struct, +func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) { + var err error + var pInfo = preludeInfo{} + + // reads total length of the message (first 4 bytes) + pInfo.totalLen, err = extractUint32(prelude) + if err != nil { + return pInfo, err + } + + // reads total header length of the message (2nd 4 bytes) + pInfo.headerLen, err = extractUint32(prelude) + if err != nil { + return pInfo, err + } + + // checks that the CRC is correct (3rd 4 bytes) + preCRC := crc.Sum32() + if err := checkCRC(prelude, preCRC); err != nil { + return pInfo, err + } + + return pInfo, nil +} + +// extracts the relevant information from the Headers. +func extractHeader(body io.Reader, myHeaders http.Header) error { + for { + // extracts the first part of the header, + headerTypeName, err := extractHeaderType(body) + if err != nil { + // Since end of file, we have read all of our headers + if err == io.EOF { + break + } + return err + } + + // reads the 7 present in the header and ignores it. + extractUint8(body) + + headerValueName, err := extractHeaderValue(body) + if err != nil { + return err + } + + myHeaders.Set(headerTypeName, headerValueName) + + } + return nil +} + +// extractHeaderType extracts the first half of the header message, the header type. +func extractHeaderType(body io.Reader) (string, error) { + // extracts 2 bit integer + headerNameLen, err := extractUint8(body) + if err != nil { + return "", err + } + // extracts the string with the appropriate number of bytes + headerName, err := extractString(body, int(headerNameLen)) + if err != nil { + return "", err + } + return strings.TrimPrefix(headerName, ":"), nil +} + +// extractsHeaderValue extracts the second half of the header message, the +// header value +func extractHeaderValue(body io.Reader) (string, error) { + bodyLen, err := extractUint16(body) + if err != nil { + return "", err + } + bodyName, err := extractString(body, int(bodyLen)) + if err != nil { + return "", err + } + return bodyName, nil +} + +// extracts a string from byte array of a particular number of bytes. +func extractString(source io.Reader, lenBytes int) (string, error) { + myVal := make([]byte, lenBytes) + _, err := source.Read(myVal) + if err != nil { + return "", err + } + return string(myVal), nil +} + +// extractUint32 extracts a 4 byte integer from the byte array. +func extractUint32(r io.Reader) (uint32, error) { + buf := make([]byte, 4) + _, err := readFull(r, buf) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(buf), nil +} + +// extractUint16 extracts a 2 byte integer from the byte array. +func extractUint16(r io.Reader) (uint16, error) { + buf := make([]byte, 2) + _, err := readFull(r, buf) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(buf), nil +} + +// extractUint8 extracts a 1 byte integer from the byte array. +func extractUint8(r io.Reader) (uint8, error) { + buf := make([]byte, 1) + _, err := readFull(r, buf) + if err != nil { + return 0, err + } + return buf[0], nil +} + +// checkCRC ensures that the CRC matches with the one from the reader. +func checkCRC(r io.Reader, expect uint32) error { + msgCRC, err := extractUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect) + + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go new file mode 100644 index 00000000..aa81cc43 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-stat.go @@ -0,0 +1,127 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// BucketExists verifies if bucket exists and you have permission to access it. Allows for a Context to +// control cancellations and timeouts. +func (c Client) BucketExists(ctx context.Context, bucketName string) (bool, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return false, err + } + + // Execute HEAD on bucketName. + resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + if ToErrorResponse(err).Code == "NoSuchBucket" { + return false, nil + } + return false, err + } + if resp != nil { + resperr := httpRespToErrorResponse(resp, bucketName, "") + if ToErrorResponse(resperr).Code == "NoSuchBucket" { + return false, nil + } + if resp.StatusCode != http.StatusOK { + return false, httpRespToErrorResponse(resp, bucketName, "") + } + } + return true, nil +} + +// StatObject verifies if object exists and you have permission to access. +func (c Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ObjectInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return ObjectInfo{}, err + } + return c.statObject(ctx, bucketName, objectName, opts) +} + +// Lower level API for statObject supporting pre-conditions and range headers. +func (c Client) statObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ObjectInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return ObjectInfo{}, err + } + headers := opts.Header() + if opts.Internal.ReplicationDeleteMarker { + headers.Set(minIOBucketReplicationDeleteMarker, "true") + } + + urlValues := make(url.Values) + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + // Execute HEAD on objectName. + resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + deleteMarker := resp.Header.Get(amzDeleteMarker) == "true" + + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + if resp.StatusCode == http.StatusBadRequest && opts.VersionID != "" && deleteMarker { + errResp := ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "MethodNotAllowed", + Message: "The specified method is not allowed against this resource.", + BucketName: bucketName, + Key: objectName, + } + return ObjectInfo{ + VersionID: resp.Header.Get(amzVersionID), + IsDeleteMarker: deleteMarker, + }, errResp + } + return ObjectInfo{ + VersionID: resp.Header.Get(amzVersionID), + IsDeleteMarker: deleteMarker, + }, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + return ToObjectInfo(bucketName, objectName, resp.Header) +} diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go new file mode 100644 index 00000000..44660ab6 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -0,0 +1,896 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net" + "net/http" + "net/http/cookiejar" + "net/http/httputil" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" + + md5simd "github.com/minio/md5-simd" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/signer" + "golang.org/x/net/publicsuffix" +) + +// Client implements Amazon S3 compatible methods. +type Client struct { + /// Standard options. + + // Parsed endpoint url provided by the user. + endpointURL *url.URL + + // Holds various credential providers. + credsProvider *credentials.Credentials + + // Custom signerType value overrides all credentials. + overrideSignerType credentials.SignatureType + + // User supplied. + appInfo struct { + appName string + appVersion string + } + + // Indicate whether we are using https or not + secure bool + + // Needs allocation. + httpClient *http.Client + bucketLocCache *bucketLocationCache + + // Advanced functionality. + isTraceEnabled bool + traceErrorsOnly bool + traceOutput io.Writer + + // S3 specific accelerated endpoint. + s3AccelerateEndpoint string + + // Region endpoint + region string + + // Random seed. + random *rand.Rand + + // lookup indicates type of url lookup supported by server. If not specified, + // default to Auto. + lookup BucketLookupType + + // Factory for MD5 hash functions. + md5Hasher func() md5simd.Hasher + sha256Hasher func() md5simd.Hasher +} + +// Options for New method +type Options struct { + Creds *credentials.Credentials + Secure bool + Transport http.RoundTripper + Region string + BucketLookup BucketLookupType + + // Custom hash routines. Leave nil to use standard. + CustomMD5 func() md5simd.Hasher + CustomSHA256 func() md5simd.Hasher +} + +// Global constants. +const ( + libraryName = "minio-go" + libraryVersion = "v7.0.11" +) + +// User Agent should always following the below style. +// Please open an issue to discuss any new changes here. +// +// MinIO (OS; ARCH) LIB/VER APP/VER +const ( + libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") " + libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion +) + +// BucketLookupType is type of url lookup supported by server. +type BucketLookupType int + +// Different types of url lookup supported by the server.Initialized to BucketLookupAuto +const ( + BucketLookupAuto BucketLookupType = iota + BucketLookupDNS + BucketLookupPath +) + +// New - instantiate minio client with options +func New(endpoint string, opts *Options) (*Client, error) { + if opts == nil { + return nil, errors.New("no options provided") + } + clnt, err := privateNew(endpoint, opts) + if err != nil { + return nil, err + } + // Google cloud storage should be set to signature V2, force it if not. + if s3utils.IsGoogleEndpoint(*clnt.endpointURL) { + clnt.overrideSignerType = credentials.SignatureV2 + } + // If Amazon S3 set to signature v4. + if s3utils.IsAmazonEndpoint(*clnt.endpointURL) { + clnt.overrideSignerType = credentials.SignatureV4 + } + + return clnt, nil +} + +// EndpointURL returns the URL of the S3 endpoint. +func (c *Client) EndpointURL() *url.URL { + endpoint := *c.endpointURL // copy to prevent callers from modifying internal state + return &endpoint +} + +// lockedRandSource provides protected rand source, implements rand.Source interface. +type lockedRandSource struct { + lk sync.Mutex + src rand.Source +} + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. +func (r *lockedRandSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +// Seed uses the provided seed value to initialize the generator to a +// deterministic state. +func (r *lockedRandSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// Redirect requests by re signing the request. +func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error { + if len(via) >= 5 { + return errors.New("stopped after 5 redirects") + } + if len(via) == 0 { + return nil + } + lastRequest := via[len(via)-1] + var reAuth bool + for attr, val := range lastRequest.Header { + // if hosts do not match do not copy Authorization header + if attr == "Authorization" && req.Host != lastRequest.Host { + reAuth = true + continue + } + if _, ok := req.Header[attr]; !ok { + req.Header[attr] = val + } + } + + *c.endpointURL = *req.URL + + value, err := c.credsProvider.Get() + if err != nil { + return err + } + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + region = c.region + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + + if reAuth { + // Check if there is no region override, if not get it from the URL if possible. + if region == "" { + region = s3utils.GetRegionFromURL(*c.endpointURL) + } + switch { + case signerType.IsV2(): + return errors.New("signature V2 cannot support redirection") + case signerType.IsV4(): + signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region)) + } + } + return nil +} + +func privateNew(endpoint string, opts *Options) (*Client, error) { + // construct endpoint. + endpointURL, err := getEndpointURL(endpoint, opts.Secure) + if err != nil { + return nil, err + } + + // Initialize cookies to preserve server sent cookies if any and replay + // them upon each request. + jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) + if err != nil { + return nil, err + } + + // instantiate new Client. + clnt := new(Client) + + // Save the credentials. + clnt.credsProvider = opts.Creds + + // Remember whether we are using https or not + clnt.secure = opts.Secure + + // Save endpoint URL, user agent for future uses. + clnt.endpointURL = endpointURL + + transport := opts.Transport + if transport == nil { + transport, err = DefaultTransport(opts.Secure) + if err != nil { + return nil, err + } + } + + // Instantiate http client and bucket location cache. + clnt.httpClient = &http.Client{ + Jar: jar, + Transport: transport, + CheckRedirect: clnt.redirectHeaders, + } + + // Sets custom region, if region is empty bucket location cache is used automatically. + if opts.Region == "" { + opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL) + } + clnt.region = opts.Region + + // Instantiate bucket location cache. + clnt.bucketLocCache = newBucketLocationCache() + + // Introduce a new locked random seed. + clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) + + // Add default md5 hasher. + clnt.md5Hasher = opts.CustomMD5 + clnt.sha256Hasher = opts.CustomSHA256 + if clnt.md5Hasher == nil { + clnt.md5Hasher = newMd5Hasher + } + if clnt.sha256Hasher == nil { + clnt.sha256Hasher = newSHA256Hasher + } + // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined + // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. + clnt.lookup = opts.BucketLookup + // Return. + return clnt, nil +} + +// SetAppInfo - add application details to user agent. +func (c *Client) SetAppInfo(appName string, appVersion string) { + // if app name and version not set, we do not set a new user agent. + if appName != "" && appVersion != "" { + c.appInfo.appName = appName + c.appInfo.appVersion = appVersion + } +} + +// TraceOn - enable HTTP tracing. +func (c *Client) TraceOn(outputStream io.Writer) { + // if outputStream is nil then default to os.Stdout. + if outputStream == nil { + outputStream = os.Stdout + } + // Sets a new output stream. + c.traceOutput = outputStream + + // Enable tracing. + c.isTraceEnabled = true +} + +// TraceErrorsOnlyOn - same as TraceOn, but only errors will be traced. +func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) { + c.TraceOn(outputStream) + c.traceErrorsOnly = true +} + +// TraceErrorsOnlyOff - Turns off the errors only tracing and everything will be traced after this call. +// If all tracing needs to be turned off, call TraceOff(). +func (c *Client) TraceErrorsOnlyOff() { + c.traceErrorsOnly = false +} + +// TraceOff - disable HTTP tracing. +func (c *Client) TraceOff() { + // Disable tracing. + c.isTraceEnabled = false + c.traceErrorsOnly = false +} + +// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your +// requests. This feature is only specific to S3 for all other endpoints this +// function does nothing. To read further details on s3 transfer acceleration +// please vist - +// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html +func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + c.s3AccelerateEndpoint = accelerateEndpoint + } +} + +// Hash materials provides relevant initialized hash algo writers +// based on the expected signature type. +// +// - For signature v4 request if the connection is insecure compute only sha256. +// - For signature v4 request if the connection is secure compute only md5. +// - For anonymous request compute md5. +func (c *Client) hashMaterials(isMd5Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) { + hashSums = make(map[string][]byte) + hashAlgos = make(map[string]md5simd.Hasher) + if c.overrideSignerType.IsV4() { + if c.secure { + hashAlgos["md5"] = c.md5Hasher() + } else { + hashAlgos["sha256"] = c.sha256Hasher() + } + } else { + if c.overrideSignerType.IsAnonymous() { + hashAlgos["md5"] = c.md5Hasher() + } + } + if isMd5Requested { + hashAlgos["md5"] = c.md5Hasher() + } + return hashAlgos, hashSums +} + +// requestMetadata - is container for all the values to make a request. +type requestMetadata struct { + // If set newRequest presigns the URL. + presignURL bool + + // User supplied. + bucketName string + objectName string + queryValues url.Values + customHeader http.Header + expires int64 + + // Generated by our internal code. + bucketLocation string + contentBody io.Reader + contentLength int64 + contentMD5Base64 string // carries base64 encoded md5sum + contentSHA256Hex string // carries hex encoded sha256sum +} + +// dumpHTTP - dump HTTP request and response. +func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { + // Starts http dump. + _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") + if err != nil { + return err + } + + // Filter out Signature field from Authorization header. + origAuth := req.Header.Get("Authorization") + if origAuth != "" { + req.Header.Set("Authorization", redactSignature(origAuth)) + } + + // Only display request header. + reqTrace, err := httputil.DumpRequestOut(req, false) + if err != nil { + return err + } + + // Write request to trace output. + _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) + if err != nil { + return err + } + + // Only display response header. + var respTrace []byte + + // For errors we make sure to dump response body as well. + if resp.StatusCode != http.StatusOK && + resp.StatusCode != http.StatusPartialContent && + resp.StatusCode != http.StatusNoContent { + respTrace, err = httputil.DumpResponse(resp, true) + if err != nil { + return err + } + } else { + respTrace, err = httputil.DumpResponse(resp, false) + if err != nil { + return err + } + } + + // Write response to trace output. + _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) + if err != nil { + return err + } + + // Ends the http dump. + _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") + if err != nil { + return err + } + + // Returns success. + return nil +} + +// do - execute http request. +func (c Client) do(req *http.Request) (*http.Response, error) { + resp, err := c.httpClient.Do(req) + if err != nil { + // Handle this specifically for now until future Golang versions fix this issue properly. + if urlErr, ok := err.(*url.Error); ok { + if strings.Contains(urlErr.Err.Error(), "EOF") { + return nil, &url.Error{ + Op: urlErr.Op, + URL: urlErr.URL, + Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."), + } + } + } + return nil, err + } + + // Response cannot be non-nil, report error if thats the case. + if resp == nil { + msg := "Response is empty. " + reportIssue + return nil, errInvalidArgument(msg) + } + + // If trace is enabled, dump http request and response, + // except when the traceErrorsOnly enabled and the response's status code is ok + if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) { + err = c.dumpHTTP(req, resp) + if err != nil { + return nil, err + } + } + + return resp, nil +} + +// List of success status. +var successStatus = []int{ + http.StatusOK, + http.StatusNoContent, + http.StatusPartialContent, +} + +// executeMethod - instantiates a given method, and retries the +// request upon any error up to maxRetries attempts in a binomially +// delayed manner using a standard back off algorithm. +func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { + var retryable bool // Indicates if request can be retried. + var bodySeeker io.Seeker // Extracted seeker from io.Reader. + var reqRetry = MaxRetry // Indicates how many times we can retry the request + + if metadata.contentBody != nil { + // Check if body is seekable then it is retryable. + bodySeeker, retryable = metadata.contentBody.(io.Seeker) + switch bodySeeker { + case os.Stdin, os.Stdout, os.Stderr: + retryable = false + } + // Retry only when reader is seekable + if !retryable { + reqRetry = 1 + } + + // Figure out if the body can be closed - if yes + // we will definitely close it upon the function + // return. + bodyCloser, ok := metadata.contentBody.(io.Closer) + if ok { + defer bodyCloser.Close() + } + } + + // Create cancel context to control 'newRetryTimer' go routine. + retryCtx, cancel := context.WithCancel(ctx) + + // Indicate to our routine to exit cleanly upon return. + defer cancel() + + for range c.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) { + // Retry executes the following function body if request has an + // error until maxRetries have been exhausted, retry attempts are + // performed after waiting for a given period of time in a + // binomial fashion. + if retryable { + // Seek back to beginning for each attempt. + if _, err = bodySeeker.Seek(0, 0); err != nil { + // If seek failed, no need to retry. + return nil, err + } + } + + // Instantiate a new request. + var req *http.Request + req, err = c.newRequest(ctx, method, metadata) + if err != nil { + errResponse := ToErrorResponse(err) + if isS3CodeRetryable(errResponse.Code) { + continue // Retry. + } + return nil, err + } + + // Initiate the request. + res, err = c.do(req) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return nil, err + } + + // Retry the request + continue + } + + // For any known successful http status, return quickly. + for _, httpStatus := range successStatus { + if httpStatus == res.StatusCode { + return res, nil + } + } + + // Read the body to be saved later. + errBodyBytes, err := ioutil.ReadAll(res.Body) + // res.Body should be closed + closeResponse(res) + if err != nil { + return nil, err + } + + // Save the body. + errBodySeeker := bytes.NewReader(errBodyBytes) + res.Body = ioutil.NopCloser(errBodySeeker) + + // For errors verify if its retryable otherwise fail quickly. + errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) + + // Save the body back again. + errBodySeeker.Seek(0, 0) // Seek back to starting point. + res.Body = ioutil.NopCloser(errBodySeeker) + + // Bucket region if set in error response and the error + // code dictates invalid region, we can retry the request + // with the new region. + // + // Additionally we should only retry if bucketLocation and custom + // region is empty. + if c.region == "" { + switch errResponse.Code { + case "AuthorizationHeaderMalformed": + fallthrough + case "InvalidRegion": + fallthrough + case "AccessDenied": + if errResponse.Region == "" { + // Region is empty we simply return the error. + return res, err + } + // Region is not empty figure out a way to + // handle this appropriately. + if metadata.bucketName != "" { + // Gather Cached location only if bucketName is present. + if location, cachedOk := c.bucketLocCache.Get(metadata.bucketName); cachedOk && location != errResponse.Region { + c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) + continue // Retry. + } + } else { + // This is for ListBuckets() fallback. + if errResponse.Region != metadata.bucketLocation { + // Retry if the error response has a different region + // than the request we just made. + metadata.bucketLocation = errResponse.Region + continue // Retry + } + } + } + } + + // Verify if error response code is retryable. + if isS3CodeRetryable(errResponse.Code) { + continue // Retry. + } + + // Verify if http status code is retryable. + if isHTTPStatusRetryable(res.StatusCode) { + continue // Retry. + } + + // For all other cases break out of the retry loop. + break + } + + // Return an error when retry is canceled or deadlined + if e := retryCtx.Err(); e != nil { + return nil, e + } + + return res, err +} + +// newRequest - instantiate a new HTTP request for a given method. +func (c Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) { + // If no method is supplied default to 'POST'. + if method == "" { + method = http.MethodPost + } + + location := metadata.bucketLocation + if location == "" { + if metadata.bucketName != "" { + // Gather location only if bucketName is present. + location, err = c.getBucketLocation(ctx, metadata.bucketName) + if err != nil { + return nil, err + } + } + if location == "" { + location = getDefaultLocation(*c.endpointURL, c.region) + } + } + + // Look if target url supports virtual host. + // We explicitly disallow MakeBucket calls to not use virtual DNS style, + // since the resolution may fail. + isMakeBucket := (metadata.objectName == "" && method == http.MethodPut && len(metadata.queryValues) == 0) + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) && !isMakeBucket + + // Construct a new target URL. + targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, + isVirtualHost, metadata.queryValues) + if err != nil { + return nil, err + } + + // Initialize a new HTTP request for the method. + req, err = http.NewRequestWithContext(ctx, method, targetURL.String(), nil) + if err != nil { + return nil, err + } + + // Get credentials from the configured credentials provider. + value, err := c.credsProvider.Get() + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + + // Generate presign url if needed, return right here. + if metadata.expires != 0 && metadata.presignURL { + if signerType.IsAnonymous() { + return nil, errInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") + } + if signerType.IsV2() { + // Presign URL with signature v2. + req = signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) + } else if signerType.IsV4() { + // Presign URL with signature v4. + req = signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) + } + return req, nil + } + + // Set 'User-Agent' header for the request. + c.setUserAgent(req) + + // Set all headers. + for k, v := range metadata.customHeader { + req.Header.Set(k, v[0]) + } + + // Go net/http notoriously closes the request body. + // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors. + // This can cause underlying *os.File seekers to fail, avoid that + // by making sure to wrap the closer as a nop. + if metadata.contentLength == 0 { + req.Body = nil + } else { + req.Body = ioutil.NopCloser(metadata.contentBody) + } + + // Set incoming content-length. + req.ContentLength = metadata.contentLength + if req.ContentLength <= -1 { + // For unknown content length, we upload using transfer-encoding: chunked. + req.TransferEncoding = []string{"chunked"} + } + + // set md5Sum for content protection. + if len(metadata.contentMD5Base64) > 0 { + req.Header.Set("Content-Md5", metadata.contentMD5Base64) + } + + // For anonymous requests just return. + if signerType.IsAnonymous() { + return req, nil + } + + switch { + case signerType.IsV2(): + // Add signature version '2' authorization header. + req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) + case metadata.objectName != "" && metadata.queryValues == nil && method == http.MethodPut && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure: + // Streaming signature is used by default for a PUT object request. Additionally we also + // look if the initialized client is secure, if yes then we don't need to perform + // streaming signature. + req = signer.StreamingSignV4(req, accessKeyID, + secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) + default: + // Set sha256 sum for signature calculation only with signature version '4'. + shaHeader := unsignedPayload + if metadata.contentSHA256Hex != "" { + shaHeader = metadata.contentSHA256Hex + } + req.Header.Set("X-Amz-Content-Sha256", shaHeader) + + // Add signature version '4' authorization header. + req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) + } + + // Return request. + return req, nil +} + +// set User agent. +func (c Client) setUserAgent(req *http.Request) { + req.Header.Set("User-Agent", libraryUserAgent) + if c.appInfo.appName != "" && c.appInfo.appVersion != "" { + req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) + } +} + +// makeTargetURL make a new target url. +func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { + host := c.endpointURL.Host + // For Amazon S3 endpoint, try to fetch location based endpoint. + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + if c.s3AccelerateEndpoint != "" && bucketName != "" { + // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + // Disable transfer acceleration for non-compliant bucket names. + if strings.Contains(bucketName, ".") { + return nil, errTransferAccelerationBucket(bucketName) + } + // If transfer acceleration is requested set new host. + // For more details about enabling transfer acceleration read here. + // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + host = c.s3AccelerateEndpoint + } else { + // Do not change the host if the endpoint URL is a FIPS S3 endpoint. + if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) { + // Fetch new host based on the bucket location. + host = getS3Endpoint(bucketLocation) + } + } + } + + // Save scheme. + scheme := c.endpointURL.Scheme + + // Strip port 80 and 443 so we won't send these ports in Host header. + // The reason is that browsers and curl automatically remove :80 and :443 + // with the generated presigned urls, then a signature mismatch error. + if h, p, err := net.SplitHostPort(host); err == nil { + if scheme == "http" && p == "80" || scheme == "https" && p == "443" { + host = h + } + } + + urlStr := scheme + "://" + host + "/" + // Make URL only if bucketName is available, otherwise use the + // endpoint URL. + if bucketName != "" { + // If endpoint supports virtual host style use that always. + // Currently only S3 and Google Cloud Storage would support + // virtual host style. + if isVirtualHostStyle { + urlStr = scheme + "://" + bucketName + "." + host + "/" + if objectName != "" { + urlStr = urlStr + s3utils.EncodePath(objectName) + } + } else { + // If not fall back to using path style. + urlStr = urlStr + bucketName + "/" + if objectName != "" { + urlStr = urlStr + s3utils.EncodePath(objectName) + } + } + } + + // If there are any query values, add them to the end. + if len(queryValues) > 0 { + urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) + } + + return url.Parse(urlStr) +} + +// returns true if virtual hosted style requests are to be used. +func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool { + if bucketName == "" { + return false + } + + if c.lookup == BucketLookupDNS { + return true + } + if c.lookup == BucketLookupPath { + return false + } + + // default to virtual only for Amazon/Google storage. In all other cases use + // path style requests + return s3utils.IsVirtualHostSupported(url, bucketName) +} diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go new file mode 100644 index 00000000..156150f6 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/bucket-cache.go @@ -0,0 +1,253 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "net" + "net/http" + "net/url" + "path" + "sync" + + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/signer" +) + +// bucketLocationCache - Provides simple mechanism to hold bucket +// locations in memory. +type bucketLocationCache struct { + // mutex is used for handling the concurrent + // read/write requests for cache. + sync.RWMutex + + // items holds the cached bucket locations. + items map[string]string +} + +// newBucketLocationCache - Provides a new bucket location cache to be +// used internally with the client object. +func newBucketLocationCache() *bucketLocationCache { + return &bucketLocationCache{ + items: make(map[string]string), + } +} + +// Get - Returns a value of a given key if it exists. +func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) { + r.RLock() + defer r.RUnlock() + location, ok = r.items[bucketName] + return +} + +// Set - Will persist a value into cache. +func (r *bucketLocationCache) Set(bucketName string, location string) { + r.Lock() + defer r.Unlock() + r.items[bucketName] = location +} + +// Delete - Deletes a bucket name from cache. +func (r *bucketLocationCache) Delete(bucketName string) { + r.Lock() + defer r.Unlock() + delete(r.items, bucketName) +} + +// GetBucketLocation - get location for the bucket name from location cache, if not +// fetch freshly by making a new request. +func (c Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + return c.getBucketLocation(ctx, bucketName) +} + +// getBucketLocation - Get location for the bucketName from location map cache, if not +// fetch freshly by making a new request. +func (c Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + + // Region set then no need to fetch bucket location. + if c.region != "" { + return c.region, nil + } + + if location, ok := c.bucketLocCache.Get(bucketName); ok { + return location, nil + } + + // Initialize a new request. + req, err := c.getBucketLocationRequest(ctx, bucketName) + if err != nil { + return "", err + } + + // Initiate the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return "", err + } + location, err := processBucketLocationResponse(resp, bucketName) + if err != nil { + return "", err + } + c.bucketLocCache.Set(bucketName, location) + return location, nil +} + +// processes the getBucketLocation http response from the server. +func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) { + if resp != nil { + if resp.StatusCode != http.StatusOK { + err = httpRespToErrorResponse(resp, bucketName, "") + errResp := ToErrorResponse(err) + // For access denied error, it could be an anonymous + // request. Move forward and let the top level callers + // succeed if possible based on their policy. + switch errResp.Code { + case "NotImplemented": + if errResp.Server == "AmazonSnowball" { + return "snowball", nil + } + case "AuthorizationHeaderMalformed": + fallthrough + case "InvalidRegion": + fallthrough + case "AccessDenied": + if errResp.Region == "" { + return "us-east-1", nil + } + return errResp.Region, nil + } + return "", err + } + } + + // Extract location. + var locationConstraint string + err = xmlDecoder(resp.Body, &locationConstraint) + if err != nil { + return "", err + } + + location := locationConstraint + // Location is empty will be 'us-east-1'. + if location == "" { + location = "us-east-1" + } + + // Location can be 'EU' convert it to meaningful 'eu-west-1'. + if location == "EU" { + location = "eu-west-1" + } + + // Save the location into cache. + + // Return. + return location, nil +} + +// getBucketLocationRequest - Wrapper creates a new getBucketLocation request. +func (c Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) { + // Set location query. + urlValues := make(url.Values) + urlValues.Set("location", "") + + // Set get bucket location always as path style. + targetURL := *c.endpointURL + + // as it works in makeTargetURL method from api.go file + if h, p, err := net.SplitHostPort(targetURL.Host); err == nil { + if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" { + targetURL.Host = h + } + } + + isVirtualHost := s3utils.IsVirtualHostSupported(targetURL, bucketName) + + var urlStr string + + //only support Aliyun OSS for virtual hosted path, compatible Amazon & Google Endpoint + if isVirtualHost && s3utils.IsAliyunOSSEndpoint(targetURL) { + urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location" + } else { + targetURL.Path = path.Join(bucketName, "") + "/" + targetURL.RawQuery = urlValues.Encode() + urlStr = targetURL.String() + } + + // Get a new HTTP request for the method. + req, err := http.NewRequestWithContext(ctx, http.MethodGet, urlStr, nil) + if err != nil { + return nil, err + } + + // Set UserAgent for the request. + c.setUserAgent(req) + + // Get credentials from the configured credentials provider. + value, err := c.credsProvider.Get() + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + + if signerType.IsAnonymous() { + return req, nil + } + + if signerType.IsV2() { + // Get Bucket Location calls should be always path style + isVirtualHost := false + req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) + return req, nil + } + + // Set sha256 sum for signature calculation only with signature version '4'. + contentSha256 := emptySHA256Hex + if c.secure { + contentSha256 = unsignedPayload + } + + req.Header.Set("X-Amz-Content-Sha256", contentSha256) + req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") + return req, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/code_of_conduct.md b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md new file mode 100644 index 00000000..cb232c3c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md @@ -0,0 +1,80 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior, in compliance with the +licensing terms applying to the Project developments. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. However, these actions shall respect the +licensing terms of the Project Developments that will always supersede such +Code of Conduct. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at dev@min.io. The project team +will review and investigate all complaints, and will respond in a way that it deems +appropriate to the circumstances. The project team is obligated to maintain +confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +This version includes a clarification to ensure that the code of conduct is in +compliance with the free software licensing terms of the project. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go new file mode 100644 index 00000000..2a2e6a0d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/constants.go @@ -0,0 +1,92 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +/// Multipart upload defaults. + +// absMinPartSize - absolute minimum part size (5 MiB) below which +// a part in a multipart upload may not be uploaded. +const absMinPartSize = 1024 * 1024 * 5 + +// minPartSize - minimum part size 16MiB per object after which +// putObject behaves internally as multipart. +const minPartSize = 1024 * 1024 * 16 + +// maxPartsCount - maximum number of parts for a single multipart session. +const maxPartsCount = 10000 + +// maxPartSize - maximum part size 5GiB for a single multipart upload +// operation. +const maxPartSize = 1024 * 1024 * 1024 * 5 + +// maxSinglePutObjectSize - maximum size 5GiB of object per PUT +// operation. +const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 + +// maxMultipartPutObjectSize - maximum size 5TiB of object for +// Multipart operation. +const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +// we don't want to sign the request payload +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// Total number of parallel workers used for multipart operation. +const totalWorkers = 4 + +// Signature related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" +) + +const ( + // Storage class header. + amzStorageClass = "X-Amz-Storage-Class" + + // Website redirect location header + amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location" + + // Object Tagging headers + amzTaggingHeader = "X-Amz-Tagging" + amzTaggingHeaderDirective = "X-Amz-Tagging-Directive" + + amzVersionID = "X-Amz-Version-Id" + amzTaggingCount = "X-Amz-Tagging-Count" + amzExpiration = "X-Amz-Expiration" + amzReplicationStatus = "X-Amz-Replication-Status" + amzDeleteMarker = "X-Amz-Delete-Marker" + + // Object legal hold header + amzLegalHoldHeader = "X-Amz-Object-Lock-Legal-Hold" + + // Object retention header + amzLockMode = "X-Amz-Object-Lock-Mode" + amzLockRetainUntil = "X-Amz-Object-Lock-Retain-Until-Date" + amzBypassGovernance = "X-Amz-Bypass-Governance-Retention" + + // Replication status + amzBucketReplicationStatus = "X-Amz-Replication-Status" + // Minio specific Replication/lifecycle transition extension + minIOBucketSourceMTime = "X-Minio-Source-Mtime" + + minIOBucketSourceETag = "X-Minio-Source-Etag" + minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker" + minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request" + minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request" +) diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go new file mode 100644 index 00000000..2bf4edf0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/core.go @@ -0,0 +1,133 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "net/http" + + "github.com/minio/minio-go/v7/pkg/encrypt" +) + +// Core - Inherits Client and adds new methods to expose the low level S3 APIs. +type Core struct { + *Client +} + +// NewCore - Returns new initialized a Core client, this CoreClient should be +// only used under special conditions such as need to access lower primitives +// and being able to use them to write your own wrappers. +func NewCore(endpoint string, opts *Options) (*Core, error) { + var s3Client Core + client, err := New(endpoint, opts) + if err != nil { + return nil, err + } + s3Client.Client = client + return &s3Client, nil +} + +// ListObjects - List all the objects at a prefix, optionally with marker and delimiter +// you can further filter the results. +func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { + return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys) +} + +// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses +// continuationToken instead of marker to support iteration over the results. +func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) { + return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, fetchOwner, false, delimiter, maxkeys) +} + +// CopyObject - copies an object from source object to destination object on server side. +func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) { + return c.copyObjectDo(ctx, sourceBucket, sourceObject, destBucket, destObject, metadata, srcOpts, dstOpts) +} + +// CopyObjectPart - creates a part in a multipart upload by copying (a +// part of) an existing object. +func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, + partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) { + + return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID, + partID, startOffset, length, metadata) +} + +// PutObject - Upload object. Uploads using single PUT call. +func (c Core) PutObject(ctx context.Context, bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, opts PutObjectOptions) (UploadInfo, error) { + hookReader := newHook(data, opts.Progress) + return c.putObjectDo(ctx, bucket, object, hookReader, md5Base64, sha256Hex, size, opts) +} + +// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID. +func (c Core) NewMultipartUpload(ctx context.Context, bucket, object string, opts PutObjectOptions) (uploadID string, err error) { + result, err := c.initiateMultipartUpload(ctx, bucket, object, opts) + return result.UploadID, err +} + +// ListMultipartUploads - List incomplete uploads. +func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) { + return c.listMultipartUploadsQuery(ctx, bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads) +} + +// PutObjectPart - Upload an object part. +func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { + return c.uploadPart(ctx, bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse) +} + +// ListObjectParts - List uploaded parts of an incomplete upload.x +func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) { + return c.listObjectPartsQuery(ctx, bucket, object, uploadID, partNumberMarker, maxParts) +} + +// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. +func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart) (string, error) { + res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{ + Parts: parts, + }) + return res.ETag, err +} + +// AbortMultipartUpload - Abort an incomplete upload. +func (c Core) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { + return c.abortMultipartUpload(ctx, bucket, object, uploadID) +} + +// GetBucketPolicy - fetches bucket access policy for a given bucket. +func (c Core) GetBucketPolicy(ctx context.Context, bucket string) (string, error) { + return c.getBucketPolicy(ctx, bucket) +} + +// PutBucketPolicy - applies a new bucket access policy for a given bucket. +func (c Core) PutBucketPolicy(ctx context.Context, bucket, bucketPolicy string) error { + return c.putBucketPolicy(ctx, bucket, bucketPolicy) +} + +// GetObject is a lower level API implemented to support reading +// partial objects and also downloading objects with special conditions +// matching etag, modtime etc. +func (c Core) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { + return c.getObject(ctx, bucketName, objectName, opts) +} + +// StatObject is a lower level API implemented to support special +// conditions matching etag, modtime on a request. +func (c Core) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { + return c.statObject(ctx, bucketName, objectName, opts) +} diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go new file mode 100644 index 00000000..ba7ff577 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -0,0 +1,11502 @@ +// +build mint + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "bytes" + "context" + "errors" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "math/rand" + "mime/multipart" + "net/http" + "net/url" + "os" + "path/filepath" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "time" + + "github.com/dustin/go-humanize" + jsoniter "github.com/json-iterator/go" + log "github.com/sirupsen/logrus" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/notification" + "github.com/minio/minio-go/v7/pkg/tags" +) + +const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= len(buf) { + err = nil + } else if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +func cleanEmptyEntries(fields log.Fields) log.Fields { + cleanFields := log.Fields{} + for k, v := range fields { + if v != "" { + cleanFields[k] = v + } + } + return cleanFields +} + +// log successful test runs +func successLogger(testName string, function string, args map[string]interface{}, startTime time.Time) *log.Entry { + // calculate the test case duration + duration := time.Since(startTime) + // log with the fields as per mint + fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": "PASS"} + return log.WithFields(cleanEmptyEntries(fields)) +} + +// As few of the features are not available in Gateway(s) currently, Check if err value is NotImplemented, +// and log as NA in that case and continue execution. Otherwise log as failure and return +func logError(testName string, function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) { + // If server returns NotImplemented we assume it is gateway mode and hence log it as info and move on to next tests + // Special case for ComposeObject API as it is implemented on client side and adds specific error details like `Error in upload-part-copy` in + // addition to NotImplemented error returned from server + if isErrNotImplemented(err) { + ignoredLog(testName, function, args, startTime, message).Info() + } else { + failureLog(testName, function, args, startTime, alert, message, err).Fatal() + } +} + +// log failed test runs +func failureLog(testName string, function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry { + // calculate the test case duration + duration := time.Since(startTime) + var fields log.Fields + // log with the fields as per mint + if err != nil { + fields = log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, + "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, "error": err} + } else { + fields = log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, + "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message} + } + return log.WithFields(cleanEmptyEntries(fields)) +} + +// log not applicable test runs +func ignoredLog(testName string, function string, args map[string]interface{}, startTime time.Time, alert string) *log.Entry { + // calculate the test case duration + duration := time.Since(startTime) + // log with the fields as per mint + fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, + "duration": duration.Nanoseconds() / 1000000, "status": "NA", "alert": strings.Split(alert, " ")[0] + " is NotImplemented"} + return log.WithFields(cleanEmptyEntries(fields)) +} + +// Delete objects in given bucket, recursively +func cleanupBucket(bucketName string, c *minio.Client) error { + // Create a done channel to control 'ListObjectsV2' go routine. + doneCh := make(chan struct{}) + // Exit cleanly upon return. + defer close(doneCh) + // Iterate over all objects in the bucket via listObjectsV2 and delete + for objCh := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Recursive: true}) { + if objCh.Err != nil { + return objCh.Err + } + if objCh.Key != "" { + err := c.RemoveObject(context.Background(), bucketName, objCh.Key, minio.RemoveObjectOptions{}) + if err != nil { + return err + } + } + } + for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { + if objPartInfo.Err != nil { + return objPartInfo.Err + } + if objPartInfo.Key != "" { + err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) + if err != nil { + return err + } + } + } + // objects are already deleted, clear the buckets now + err := c.RemoveBucket(context.Background(), bucketName) + if err != nil { + return err + } + return err +} + +func cleanupVersionedBucket(bucketName string, c *minio.Client) error { + doneCh := make(chan struct{}) + defer close(doneCh) + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { + if obj.Err != nil { + return obj.Err + } + if obj.Key != "" { + err := c.RemoveObject(context.Background(), bucketName, obj.Key, + minio.RemoveObjectOptions{VersionID: obj.VersionID, GovernanceBypass: true}) + if err != nil { + return err + } + } + } + for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { + if objPartInfo.Err != nil { + return objPartInfo.Err + } + if objPartInfo.Key != "" { + err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) + if err != nil { + return err + } + } + } + // objects are already deleted, clear the buckets now + err := c.RemoveBucket(context.Background(), bucketName) + if err != nil { + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { + log.Println("found", obj.Key, obj.VersionID) + } + return err + } + return err +} + +func isErrNotImplemented(err error) bool { + return minio.ToErrorResponse(err).Code == "NotImplemented" +} + +func init() { + // If server endpoint is not set, all tests default to + // using https://play.min.io + if os.Getenv(serverEndpoint) == "" { + os.Setenv(serverEndpoint, "play.min.io") + os.Setenv(accessKey, "Q3AM3UQ867SPQQA43P2F") + os.Setenv(secretKey, "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG") + os.Setenv(enableHTTPS, "1") + } +} + +var mintDataDir = os.Getenv("MINT_DATA_DIR") + +func getMintDataDirFilePath(filename string) (fp string) { + if mintDataDir == "" { + return + } + return filepath.Join(mintDataDir, filename) +} + +func newRandomReader(seed, size int64) io.Reader { + return io.LimitReader(rand.New(rand.NewSource(seed)), size) +} + +func mustCrcReader(r io.Reader) uint32 { + crc := crc32.NewIEEE() + _, err := io.Copy(crc, r) + if err != nil { + panic(err) + } + return crc.Sum32() +} + +func crcMatches(r io.Reader, want uint32) error { + crc := crc32.NewIEEE() + _, err := io.Copy(crc, r) + if err != nil { + panic(err) + } + got := crc.Sum32() + if got != want { + return fmt.Errorf("crc mismatch, want %x, got %x", want, got) + } + return nil +} + +func crcMatchesName(r io.Reader, name string) error { + want := dataFileCRC32[name] + crc := crc32.NewIEEE() + _, err := io.Copy(crc, r) + if err != nil { + panic(err) + } + got := crc.Sum32() + if got != want { + return fmt.Errorf("crc mismatch, want %x, got %x", want, got) + } + return nil +} + +// read data from file if it exists or optionally create a buffer of particular size +func getDataReader(fileName string) io.ReadCloser { + if mintDataDir == "" { + size := int64(dataFileMap[fileName]) + if _, ok := dataFileCRC32[fileName]; !ok { + dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size)) + } + return ioutil.NopCloser(newRandomReader(size, size)) + } + reader, _ := os.Open(getMintDataDirFilePath(fileName)) + if _, ok := dataFileCRC32[fileName]; !ok { + dataFileCRC32[fileName] = mustCrcReader(reader) + reader.Close() + reader, _ = os.Open(getMintDataDirFilePath(fileName)) + } + return reader +} + +// randString generates random names and prepends them with a known prefix. +func randString(n int, src rand.Source, prefix string) string { + b := make([]byte, n) + // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters! + for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + return prefix + string(b[0:30-len(prefix)]) +} + +var dataFileMap = map[string]int{ + "datafile-0-b": 0, + "datafile-1-b": 1, + "datafile-1-kB": 1 * humanize.KiByte, + "datafile-10-kB": 10 * humanize.KiByte, + "datafile-33-kB": 33 * humanize.KiByte, + "datafile-100-kB": 100 * humanize.KiByte, + "datafile-1.03-MB": 1056 * humanize.KiByte, + "datafile-1-MB": 1 * humanize.MiByte, + "datafile-5-MB": 5 * humanize.MiByte, + "datafile-6-MB": 6 * humanize.MiByte, + "datafile-11-MB": 11 * humanize.MiByte, + "datafile-65-MB": 65 * humanize.MiByte, + "datafile-129-MB": 129 * humanize.MiByte, +} + +var dataFileCRC32 = map[string]uint32{} + +func isFullMode() bool { + return os.Getenv("MINT_MODE") == "full" +} + +func getFuncName() string { + return getFuncNameLoc(2) +} + +func getFuncNameLoc(caller int) string { + pc, _, _, _ := runtime.Caller(caller) + return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.") +} + +// Tests bucket re-create errors. +func testMakeBucketError() { + region := "eu-central-1" + + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + // initialize logging params + args := map[string]interface{}{ + "bucketName": "", + "region": region, + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket Failed", err) + return + } + defer cleanupBucket(bucketName, c) + + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { + logError(testName, function, args, startTime, "", "Bucket already exists", err) + return + } + // Verify valid error response from server. + if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && + minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { + logError(testName, function, args, startTime, "", "Invalid error returned by server", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testMetadataSizeLimit() { + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, objectSize, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts.UserMetadata": "", + } + rand.Seed(startTime.Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + const HeaderSizeLimit = 8 * 1024 + const UserMetadataLimit = 2 * 1024 + + // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail + metadata := make(map[string]string) + metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test"))) + args["metadata"] = fmt.Sprint(metadata) + + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + if err == nil { + logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil) + return + } + + // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail + metadata = make(map[string]string) + metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test"))) + args["metadata"] = fmt.Sprint(metadata) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + if err == nil { + logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests various bucket supported formats. +func testMakeBucketRegions() { + region := "eu-central-1" + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + // initialize logging params + args := map[string]interface{}{ + "bucketName": "", + "region": region, + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + // Make a new bucket with '.' in its name, in 'us-west-2'. This + // request is internally staged into a path style instead of + // virtual host style. + region = "us-west-2" + args["region"] = region + if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName+".withperiod", c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +// Test PutObject using a large data to trigger multipart readat +func testPutObjectReadAt() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "objectContentType", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Object content type + objectContentType := "binary/octet-stream" + args["objectContentType"] = objectContentType + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Get Object failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat Object failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err) + return + } + if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content types don't match", err) + return + } + if err := crcMatchesName(r, "datafile-129-MB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testListObjectVersions() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ListObjectVersions(bucketName, prefix, recursive)" + args := map[string]interface{}{ + "bucketName": "", + "prefix": "", + "recursive": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-10-kB"] + var reader = getDataReader("datafile-10-kB") + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + bufSize = dataFileMap["datafile-1-b"] + reader = getDataReader("datafile-1-b") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Unexpected object deletion", err) + return + } + + var deleteMarkers, versions int + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + if info.Key != objectName { + logError(testName, function, args, startTime, "", "Unexpected object name in listing objects", nil) + return + } + if info.VersionID == "" { + logError(testName, function, args, startTime, "", "Unexpected version id in listing objects", nil) + return + } + if info.IsDeleteMarker { + deleteMarkers++ + if !info.IsLatest { + logError(testName, function, args, startTime, "", "Unexpected IsLatest field in listing objects", nil) + return + } + } else { + versions++ + } + } + + if deleteMarkers != 1 { + logError(testName, function, args, startTime, "", "Unexpected number of DeleteMarker elements in listing objects", nil) + return + } + + if versions != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testStatObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "StatObject" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-10-kB"] + var reader = getDataReader("datafile-10-kB") + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + bufSize = dataFileMap["datafile-1-b"] + reader = getDataReader("datafile-1-b") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + if len(results) != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + for i := 0; i < len(results); i++ { + opts := minio.StatObjectOptions{VersionID: results[i].VersionID} + statInfo, err := c.StatObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during HEAD object", err) + return + } + if statInfo.VersionID == "" || statInfo.VersionID != results[i].VersionID { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected version id", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testGetObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Save the contents of datafiles to check with GetObject() reader output later + var buffers [][]byte + var testFiles = []string{"datafile-1-b", "datafile-10-kB"} + + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + buffers = append(buffers, buf) + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + if len(results) != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + sort.SliceStable(results, func(i, j int) bool { + return results[i].Size < results[j].Size + }) + + sort.SliceStable(buffers, func(i, j int) bool { + return len(buffers[i]) < len(buffers[j]) + }) + + for i := 0; i < len(results); i++ { + opts := minio.GetObjectOptions{VersionID: results[i].VersionID} + reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during GET object", err) + return + } + statInfo, err := reader.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + + tmpBuffer := bytes.NewBuffer([]byte{}) + _, err = io.Copy(tmpBuffer, reader) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) + return + } + + if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { + logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testCopyObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + var testFiles = []string{"datafile-1-b", "datafile-10-kB"} + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var infos []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + infos = append(infos, info) + } + + sort.Slice(infos, func(i, j int) bool { + return infos[i].Size < infos[j].Size + }) + + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) + return + } + + oldestContent, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) + return + } + + // Copy Source + srcOpts := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: infos[0].VersionID, + } + args["src"] = srcOpts + + dstOpts := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + args["dst"] = dstOpts + + // Perform the Copy + if _, err = c.CopyObject(context.Background(), dstOpts, srcOpts); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer readerCopy.Close() + + newestContent, err := ioutil.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) + return + } + + if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testComposeObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // var testFiles = []string{"datafile-5-MB", "datafile-10-kB"} + var testFiles = []string{"datafile-5-MB", "datafile-10-kB"} + var testFilesBytes [][]byte + + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + testFilesBytes = append(testFilesBytes, buf) + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + sort.SliceStable(results, func(i, j int) bool { + return results[i].Size > results[j].Size + }) + + // Source objects to concatenate. We also specify decryption + // key for each + src1 := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: results[0].VersionID, + } + + src2 := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: results[1].VersionID, + } + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + + _, err = c.ComposeObject(context.Background(), dst, src1, src2) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the copy object failed", err) + return + } + defer readerCopy.Close() + + copyContentBytes, err := ioutil.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err) + return + } + + var expectedContent []byte + for _, fileBytes := range testFilesBytes { + expectedContent = append(expectedContent, fileBytes...) + } + + if len(copyContentBytes) == 0 || !bytes.Equal(copyContentBytes, expectedContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testRemoveObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "DeleteObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var version minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + version = info + break + } + + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{VersionID: version.VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "DeleteObject failed", err) + return + } + + objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for range objectsInfo { + logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) + return + } + + err = c.RemoveBucket(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testRemoveObjectsWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "DeleteObjects()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objectsVersions := make(chan minio.ObjectInfo) + go func() { + objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, + minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for info := range objectsVersionsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + objectsVersions <- info + } + close(objectsVersions) + }() + + removeErrors := c.RemoveObjects(context.Background(), bucketName, objectsVersions, minio.RemoveObjectsOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "DeleteObjects call failed", err) + return + } + + for e := range removeErrors { + if e.Err != nil { + logError(testName, function, args, startTime, "", "Single delete operation failed", err) + return + } + } + + objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for range objectsVersionsInfo { + logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) + return + } + + err = c.RemoveBucket(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testObjectTaggingWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "{Get,Set,Remove}ObjectTagging()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + for _, file := range []string{"datafile-1-b", "datafile-10-kB"} { + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader(file), int64(dataFileMap[file]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + versionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var versions []minio.ObjectInfo + for info := range versionsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + versions = append(versions, info) + } + + sort.SliceStable(versions, func(i, j int) bool { + return versions[i].Size < versions[j].Size + }) + + tagsV1 := map[string]string{"key1": "val1"} + t1, err := tags.MapToObjectTags(tagsV1) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + err = c.PutObjectTagging(context.Background(), bucketName, objectName, t1, minio.PutObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + tagsV2 := map[string]string{"key2": "val2"} + t2, err := tags.MapToObjectTags(tagsV2) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + err = c.PutObjectTagging(context.Background(), bucketName, objectName, t2, minio.PutObjectTaggingOptions{VersionID: versions[1].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) + return + } + + tagsEqual := func(tags1, tags2 map[string]string) bool { + for k1, v1 := range tags1 { + v2, found := tags2[k1] + if found { + if v1 != v2 { + return false + } + } + } + return true + } + + gotTagsV1, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) + return + } + + if !tagsEqual(t1.ToMap(), gotTagsV1.ToMap()) { + logError(testName, function, args, startTime, "", "Unexpected tags content (1)", err) + return + } + + gotTagsV2, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTaggingContext failed", err) + return + } + + if !tagsEqual(t2.ToMap(), gotTagsV2.ToMap()) { + logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) + return + } + + err = c.RemoveObjectTagging(context.Background(), bucketName, objectName, minio.RemoveObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) + return + } + + emptyTags, err := c.GetObjectTagging(context.Background(), bucketName, objectName, + minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) + return + } + + if len(emptyTags.ToMap()) != 0 { + logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test PutObject using a large data to trigger multipart readat +func testPutObjectWithMetadata() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", + } + + if !isFullMode() { + ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Object custom metadata + customContentType := "custom/contenttype" + + args["metadata"] = map[string][]string{ + "Content-Type": {customContentType}, + "X-Amz-Meta-CustomKey": {"extra spaces in value"}, + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ + ContentType: customContentType}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if st.ContentType != customContentType && st.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err) + return + } + if err := crcMatchesName(r, "datafile-129-MB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testPutObjectWithContentLanguage() { + // initialize logging params + objectName := "test-object" + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": objectName, + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + data := []byte{} + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{ + ContentLanguage: "en", + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if objInfo.Metadata.Get("Content-Language") != "en" { + logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test put object with streaming signature. +func testPutObjectStreaming() { + // initialize logging params + objectName := "test-object" + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size,opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": objectName, + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload an object. + sizes := []int64{0, 64*1024 - 1, 64 * 1024} + + for _, size := range sizes { + data := newRandomReader(size, size) + ui, err := c.PutObject(context.Background(), bucketName, objectName, data, int64(size), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) + return + } + + if ui.Size != size { + logError(testName, function, args, startTime, "", "PutObjectStreaming result has unexpected size", nil) + return + } + + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if objInfo.Size != size { + logError(testName, function, args, startTime, "", "Unexpected size", err) + return + } + + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object seeker from the end, using whence set to '2'. +func testGetObjectSeekEnd() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + + pos, err := r.Seek(-100, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Object Seek failed", err) + return + } + if pos != st.Size-100 { + logError(testName, function, args, startTime, "", "Incorrect position", err) + return + } + buf2 := make([]byte, 100) + m, err := readFull(r, buf2) + if err != nil { + logError(testName, function, args, startTime, "", "Error reading through readFull", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err) + return + } + hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:]) + hexBuf2 := fmt.Sprintf("%02x", buf2[:m]) + if hexBuf1 != hexBuf2 { + logError(testName, function, args, startTime, "", "Values at same index dont match", err) + return + } + pos, err = r.Seek(-100, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Object Seek failed", err) + return + } + if pos != st.Size-100 { + logError(testName, function, args, startTime, "", "Incorrect position", err) + return + } + if err = r.Close(); err != nil { + logError(testName, function, args, startTime, "", "ObjectClose failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object reader to not throw error on being closed twice. +func testGetObjectClosedTwice() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + if err := crcMatchesName(r, "datafile-33-kB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Already closed object. No error returned", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test RemoveObjects request where context cancels after timeout +func testRemoveObjectsContext() { + // Initialize logging params. + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(ctx, bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + // Seed random based on current tie. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate put data. + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + // Multi remove of 20 objects. + nrObjects := 20 + objectsCh := make(chan minio.ObjectInfo) + go func() { + defer close(objectsCh) + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + continue + } + objectsCh <- minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + } + } + }() + // Set context to cancel in 1 nanosecond. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Call RemoveObjects API with short timeout. + errorCh := c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) + // Check for error. + select { + case r := <-errorCh: + if r.Err == nil { + logError(testName, function, args, startTime, "", "RemoveObjects should fail on short timeout", err) + return + } + } + // Set context with longer timeout. + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + defer cancel() + // Perform RemoveObjects with the longer timeout. Expect the removals to succeed. + errorCh = c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) + select { + case r, more := <-errorCh: + if more || r.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", r.Err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test removing multiple objects with Remove API +func testRemoveMultipleObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + // Multi remove of 1100 objects + nrObjects := 200 + + objectsCh := make(chan minio.ObjectInfo) + + go func() { + defer close(objectsCh) + // Upload objects and send them to objectsCh + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + continue + } + objectsCh <- minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + } + } + }() + + // Call RemoveObjects API + errorCh := c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) + + // Check if errorCh doesn't receive any error + select { + case r, more := <-errorCh: + if more { + logError(testName, function, args, startTime, "", "Unexpected error", r.Err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject of a big file to trigger multipart +func testFPutObjectMultipart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. + var fileName = getMintDataDirFilePath("datafile-129-MB") + if fileName == "" { + // Make a temp file with minPartSize bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload. + if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + fileName = file.Name() + args["fileName"] = fileName + } + totalSize := dataFileMap["datafile-129-MB"] + // Set base object name + objectName := bucketName + "FPutObject" + "-standard" + args["objectName"] = objectName + + objectContentType := "testapplication/octet-stream" + args["objectContentType"] = objectContentType + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", err) + return + } + if objInfo.Size != int64(totalSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err) + return + } + if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType doesn't match", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject with null contentType (default = application/octet-stream) +func testFPutObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + location := "us-east-1" + + // Make a new bucket. + args["bucketName"] = bucketName + args["location"] = location + function = "MakeBucket(bucketName, location)" + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + var fName = getMintDataDirFilePath("datafile-129-MB") + if fName == "" { + // Make a temp file with minPartSize bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload. + if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + + // Set base object name + function = "FPutObject(bucketName, objectName, fileName, opts)" + objectName := bucketName + "FPutObject" + args["objectName"] = objectName + "-standard" + args["fileName"] = fName + args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"} + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + ui, err := c.FPutObject(context.Background(), bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + if ui.Size != int64(dataFileMap["datafile-129-MB"]) { + logError(testName, function, args, startTime, "", "FPutObject returned an unexpected upload size", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/octet-stream) + args["objectName"] = objectName + "-Octet" + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + + srcFile, err := os.Open(fName) + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + defer srcFile.Close() + // Add extension to temp file name + tmpFile, err := os.Create(fName + ".gtar") + if err != nil { + logError(testName, function, args, startTime, "", "File create failed", err) + return + } + _, err = io.Copy(tmpFile, srcFile) + if err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + tmpFile.Close() + + // Perform FPutObject with no contentType provided (Expecting application/x-gtar) + args["objectName"] = objectName + "-GTar" + args["opts"] = minio.PutObjectOptions{} + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Check headers + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-standard" + rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rStandard.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err) + return + } + + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-Octet" + rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rOctet.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rOctet.ContentType, err) + return + } + + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-GTar" + rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-gtar or application/octet-stream, got "+rGTar.ContentType, err) + return + } + + os.Remove(fName + ".gtar") + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject request when context cancels after timeout +func testFPutObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 1 parts worth of data to use multipart upload. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + var fName = getMintDataDirFilePath("datafile-1-MB") + if fName == "" { + // Make a temp file with 1 MiB bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + // Upload 1 parts to trigger multipart upload + if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + + // Set base object name + objectName := bucketName + "FPutObjectContext" + args["objectName"] = objectName + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Perform FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + // Perform FPutObject with a long timeout. Expect the put object to succeed + _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on long timeout", err) + return + } + + _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + +// Tests FPutObject request when context cancels after timeout +func testFPutObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObjectContext(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{ContentType:objectContentType}", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 1 parts worth of data to use multipart upload. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + var fName = getMintDataDirFilePath("datafile-1-MB") + if fName == "" { + // Make a temp file with 1 MiB bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") + if err != nil { + logError(testName, function, args, startTime, "", "Temp file creation failed", err) + return + } + + // Upload 1 parts to trigger multipart upload + if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + + // Set base object name + objectName := bucketName + "FPutObjectContext" + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Perform FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + // Perform FPutObject with a long timeout. Expect the put object to succeed + _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on longer timeout", err) + return + } + + _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + +// Test validates putObject with context to see if request cancellation is honored. +func testPutObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "opts": "", + } + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket call failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + cancel() + args["ctx"] = ctx + args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"} + + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "PutObject should fail on short timeout", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + + defer cancel() + reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + +// Tests get object ReaderSeeker interface methods. +func testGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + // Generic seek error for errors other than io.EOF + seekErr := errors.New("seek error") + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, seekErr, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, seekErr, false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + // We expect an error + if testCase.err == seekErr && err == nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) + return + } + // We expect a specific error + if testCase.err != seekErr && testCase.err != err { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) + return + } + // If we expect an error go to the next loop + if testCase.err != nil { + continue + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + successLogger(testName, function, args, startTime).Info() +} + +// Tests get object ReaderAt interface methods. +func testGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, len(buf)) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, len(buf)+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Reproduces issue https://github.com/minio/minio-go/issues/1137 +func testGetObjectReadAtWhenEOFWasReached() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read directly + buf1 := make([]byte, len(buf)) + buf2 := make([]byte, 512) + + m, err := r.Read(buf1) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "Read read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf) { + logError(testName, function, args, startTime, "", "Incorrect count of Read data", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, 512) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[512:1024]) { + logError(testName, function, args, startTime, "", "Incorrect count of ReadAt data", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test Presigned Post Policy +func testPresignedPostPolicy() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PresignedPostPolicy(policy)" + args := map[string]interface{}{ + "policy": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + // Azure requires the key to not start with a number + metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") + metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + policy := minio.NewPostPolicy() + + if err := policy.SetBucket(""); err == nil { + logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err) + return + } + if err := policy.SetKey(""); err == nil { + logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) + return + } + if err := policy.SetKeyStartsWith(""); err == nil { + logError(testName, function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err) + return + } + if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { + logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) + return + } + if err := policy.SetContentType(""); err == nil { + logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) + return + } + if err := policy.SetContentTypeStartsWith(""); err == nil { + logError(testName, function, args, startTime, "", "SetContentTypeStartsWith did not fail for invalid conditions", err) + return + } + if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { + logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) + return + } + if err := policy.SetUserMetadata("", ""); err == nil { + logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err) + return + } + + policy.SetBucket(bucketName) + policy.SetKey(objectName) + policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days + policy.SetContentType("binary/octet-stream") + policy.SetContentLengthRange(10, 1024*1024) + policy.SetUserMetadata(metadataKey, metadataValue) + args["policy"] = policy.String() + + presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) + return + } + + var formBuf bytes.Buffer + writer := multipart.NewWriter(&formBuf) + for k, v := range formData { + writer.WriteField(k, v) + } + + // Get a 33KB file to upload and test if set post policy works + var filePath = getMintDataDirFilePath("datafile-33-kB") + if filePath == "" { + // Make a temp file with 33 KB data. + file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + filePath = file.Name() + } + + // add file to post request + f, err := os.Open(filePath) + defer f.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + w, err := writer.CreateFormFile("file", filePath) + if err != nil { + logError(testName, function, args, startTime, "", "CreateFormFile failed", err) + return + } + + _, err = io.Copy(w, f) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + writer.Close() + + transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) + if err != nil { + logError(testName, function, args, startTime, "", "DefaultTransport failed", err) + return + } + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: transport, + } + + req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes())) + if err != nil { + logError(testName, function, args, startTime, "", "Http request failed", err) + return + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + // make post request with correct form data + res, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "Http request failed", err) + return + } + defer res.Body.Close() + if res.StatusCode != http.StatusNoContent { + logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status)) + return + } + + // expected path should be absolute path of the object + var scheme string + if mustParseBool(os.Getenv(enableHTTPS)) { + scheme = "https://" + } else { + scheme = "http://" + } + + expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName + expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName + + if val, ok := res.Header["Location"]; ok { + if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { + logError(testName, function, args, startTime, "", "Location in header response is incorrect", err) + return + } + } else { + logError(testName, function, args, startTime, "", "Location not found in header response", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests copy object +func testCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(dst, src)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Make a new bucket in 'us-east-1' (destination bucket). + err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName+"-copy", c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + // Copy Source + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + // Set copy conditions. + MatchETag: objInfo.ETag, + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + } + args["src"] = src + + dst := minio.CopyDestOptions{ + Bucket: bucketName + "-copy", + Object: objectName + "-copy", + } + + // Perform the Copy + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Source object + r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + // Check the various fields of source object against destination object. + objInfo, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + objInfoCopy, err := readerCopy.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if objInfo.Size != objInfoCopy.Size { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err) + return + } + + if err := crcMatchesName(r, "datafile-33-kB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := crcMatchesName(readerCopy, "datafile-33-kB"); err != nil { + logError(testName, function, args, startTime, "", "copy data CRC check failed", err) + return + } + // Close all the get readers before proceeding with CopyObject operations. + r.Close() + readerCopy.Close() + + // CopyObject again but with wrong conditions + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + NoMatchETag: objInfo.ETag, + } + + // Perform the Copy which should fail + _, err = c.CopyObject(context.Background(), dst, src) + if err == nil { + logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) + return + } + + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + } + + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName, + ReplaceMetadata: true, + UserMetadata: map[string]string{ + "Copy": "should be same", + }, + } + args["dst"] = dst + args["src"] = src + + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err) + return + } + + oi, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + stOpts := minio.StatObjectOptions{} + stOpts.SetMatchETag(oi.ETag) + objInfo, err = c.StatObject(context.Background(), bucketName, objectName, stOpts) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err) + return + } + + if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" { + logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-C get object ReaderSeeker interface methods. +func testSSECEncryptedGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, 0, io.EOF, false, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, + // Test with invalid whence + {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + if err != nil && testCase.err == nil { + // We expected success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err == nil && testCase.err != nil { + // We expected failure, but got success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err != nil && testCase.err != nil { + if err.Error() != testCase.err.Error() { + // We expect a specific error + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-S3 get object ReaderSeeker interface methods. +func testSSES3EncryptedGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.NewSSE(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, 0, io.EOF, false, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, + // Test with invalid whence + {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + if err != nil && testCase.err == nil { + // We expected success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err == nil && testCase.err != nil { + // We expected failure, but got success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err != nil && testCase.err != nil { + if err.Error() != testCase.err.Error() { + // We expect a specific error + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-C get object ReaderAt interface methods. +func testSSECEncryptedGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + defer r.Close() + + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, len(buf)) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, len(buf)+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-S3 get object ReaderAt interface methods. +func testSSES3EncryptedGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.NewSSE(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + defer r.Close() + + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, len(buf)) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, len(buf)+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// testSSECEncryptionPutGet tests encryption with customer provided encryption keys +func testSSECEncryptionPutGet() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutEncryptedObject(bucketName, objectName, reader, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + const password = "correct horse battery staple" // https://xkcd.com/936/ + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + args["sse"] = sse + + // Put encrypted data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + successLogger(testName, function, args, startTime).Info() + + } + + successLogger(testName, function, args, startTime).Info() +} + +// TestEncryptionFPut tests encryption with customer specified encryption keys +func testSSECEncryptionFPut() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "filePath": "", + "contentType": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Object custom metadata + customContentType := "custom/contenttype" + args["metadata"] = customContentType + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 0)}, + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + const password = "correct horse battery staple" // https://xkcd.com/936/ + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + args["sse"] = sse + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + _, err = file.Write(testCase.buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + file.Close() + // Put encrypted data + if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { + logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + os.Remove(fileName) + } + + successLogger(testName, function, args, startTime).Info() +} + +// testSSES3EncryptionPutGet tests SSE-S3 encryption +func testSSES3EncryptionPutGet() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutEncryptedObject(bucketName, objectName, reader, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.NewSSE() + args["sse"] = sse + + // Put encrypted data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) + return + } + + // Read the data back without any encryption headers + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + successLogger(testName, function, args, startTime).Info() + + } + + successLogger(testName, function, args, startTime).Info() +} + +// TestSSES3EncryptionFPut tests server side encryption +func testSSES3EncryptionFPut() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "filePath": "", + "contentType": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Object custom metadata + customContentType := "custom/contenttype" + args["metadata"] = customContentType + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 0)}, + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.NewSSE() + args["sse"] = sse + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + _, err = file.Write(testCase.buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + file.Close() + // Put encrypted data + if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { + logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + os.Remove(fileName) + } + + successLogger(testName, function, args, startTime).Info() +} + +func testBucketNotification() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "SetBucketNotification(bucketName)" + args := map[string]interface{}{ + "bucketName": "", + } + + if os.Getenv("NOTIFY_BUCKET") == "" || + os.Getenv("NOTIFY_SERVICE") == "" || + os.Getenv("NOTIFY_REGION") == "" || + os.Getenv("NOTIFY_ACCOUNTID") == "" || + os.Getenv("NOTIFY_RESOURCE") == "" { + ignoredLog(testName, function, args, startTime, "Skipped notification test as it is not configured").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + bucketName := os.Getenv("NOTIFY_BUCKET") + args["bucketName"] = bucketName + + topicArn := notification.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE")) + queueArn := notification.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource") + + topicConfig := notification.NewConfig(topicArn) + topicConfig.AddEvents(notification.ObjectCreatedAll, notification.ObjectRemovedAll) + topicConfig.AddFilterSuffix("jpg") + + queueConfig := notification.NewConfig(queueArn) + queueConfig.AddEvents(notification.ObjectCreatedAll) + queueConfig.AddFilterPrefix("photos/") + + config := notification.Configuration{} + config.AddTopic(topicConfig) + + // Add the same topicConfig again, should have no effect + // because it is duplicated + config.AddTopic(topicConfig) + if len(config.TopicConfigs) != 1 { + logError(testName, function, args, startTime, "", "Duplicate entry added", err) + return + } + + // Add and remove a queue config + config.AddQueue(queueConfig) + config.RemoveQueueByArn(queueArn) + + err = c.SetBucketNotification(context.Background(), bucketName, config) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketNotification failed", err) + return + } + + config, err = c.GetBucketNotification(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketNotification failed", err) + return + } + + if len(config.TopicConfigs) != 1 { + logError(testName, function, args, startTime, "", "Topic config is empty", err) + return + } + + if config.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" { + logError(testName, function, args, startTime, "", "Couldn't get the suffix", err) + return + } + + err = c.RemoveAllBucketNotification(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests comprehensive list of all methods. +func testFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "testFunctional()" + functionAll := "" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket. + function = "MakeBucket(bucketName, region)" + functionAll = "MakeBucket(bucketName, region)" + args["bucketName"] = bucketName + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + + defer cleanupBucket(bucketName, c) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "File creation failed", err) + return + } + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + _, err = file.Write(buf) + if err != nil { + logError(testName, function, args, startTime, "", "File write failed", err) + return + } + } + file.Close() + + // Verify if bucket exits and you have access. + var exists bool + function = "BucketExists(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + exists, err = c.BucketExists(context.Background(), bucketName) + + if err != nil { + logError(testName, function, args, startTime, "", "BucketExists failed", err) + return + } + if !exists { + logError(testName, function, args, startTime, "", "Could not find the bucket", err) + return + } + + // Asserting the default bucket policy. + function = "GetBucketPolicy(ctx, bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + nilPolicy, err := c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + if nilPolicy != "" { + logError(testName, function, args, startTime, "", "policy should be set to nil", err) + return + } + + // Set the bucket policy to 'public readonly'. + function = "SetBucketPolicy(bucketName, readOnlyPolicy)" + functionAll += ", " + function + + readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readOnlyPolicy, + } + + err = c.SetBucketPolicy(context.Background(), bucketName, readOnlyPolicy) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `readonly`. + function = "GetBucketPolicy(ctx, bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + _, err = c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // Make the bucket 'public writeonly'. + function = "SetBucketPolicy(bucketName, writeOnlyPolicy)" + functionAll += ", " + function + + writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": writeOnlyPolicy, + } + err = c.SetBucketPolicy(context.Background(), bucketName, writeOnlyPolicy) + + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `writeonly`. + function = "GetBucketPolicy(ctx, bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + + _, err = c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // Make the bucket 'public read/write'. + function = "SetBucketPolicy(bucketName, readWritePolicy)" + functionAll += ", " + function + + readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readWritePolicy, + } + err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) + + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `readwrite`. + function = "GetBucketPolicy(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + _, err = c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // List all buckets. + function = "ListBuckets()" + functionAll += ", " + function + args = nil + buckets, err := c.ListBuckets(context.Background()) + + if len(buckets) == 0 { + logError(testName, function, args, startTime, "", "Found bucket list to be empty", err) + return + } + if err != nil { + logError(testName, function, args, startTime, "", "ListBuckets failed", err) + return + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err) + return + } + + objectName := bucketName + "unique" + + // Generate data + buf := bytes.Repeat([]byte("f"), 1<<19) + + function = "PutObject(bucketName, objectName, reader, contentType)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "contentType": "", + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-nolength", + "contentType": "binary/octet-stream", + } + + _, err = c.PutObject(context.Background(), bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + + function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: true}) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) + return + } + + objFound = false + isRecursive = true // Recursive is true. + function = "ListObjects()" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Prefix: objectName, Recursive: isRecursive}) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) + return + } + + incompObjNotFound := true + + function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) + return + } + + function = "GetObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + newReadBytes, err := ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err) + return + } + newReader.Close() + + function = "FGetObject(bucketName, objectName, fileName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "fileName": fileName + "-f", + } + err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject failed", err) + return + } + + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + if _, err = c.PresignedHeadObject(context.Background(), bucketName, "", 3600*time.Second, nil); err == nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject success", err) + return + } + + // Generate presigned HEAD object url. + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) + return + } + + transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) + if err != nil { + logError(testName, function, args, startTime, "", "DefaultTransport failed", err) + return + } + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: transport, + } + + req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject request was incorrect", err) + return + } + + // Verify if presigned url works. + resp, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err) + return + } + if resp.Header.Get("ETag") == "" { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) + return + } + resp.Body.Close() + + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + _, err = c.PresignedGetObject(context.Background(), bucketName, "", 3600*time.Second, nil) + if err == nil { + logError(testName, function, args, startTime, "", "PresignedGetObject success", err) + return + } + + // Generate presigned GET object url. + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + resp.Body.Close() + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + + // Set request parameters. + reqParams := make(url.Values) + reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + "reqParams": reqParams, + } + presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) + + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err = ioutil.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err) + return + } + if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { + logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + _, err = c.PresignedPutObject(context.Background(), bucketName, "", 3600*time.Second) + if err == nil { + logError(testName, function, args, startTime, "", "PresignedPutObject success", err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + "expires": 3600 * time.Second, + } + presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) + + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + buf = bytes.Repeat([]byte("g"), 1<<19) + + req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err) + return + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err) + return + } + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + function = "RemoveObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + args["objectName"] = objectName + "-f" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-f", minio.RemoveObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + args["objectName"] = objectName + "-nolength" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-nolength", minio.RemoveObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + args["objectName"] = objectName + "-presigned" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-presigned", minio.RemoveObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + function = "RemoveBucket(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + err = c.RemoveBucket(context.Background(), bucketName) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveBucket failed", err) + return + } + err = c.RemoveBucket(context.Background(), bucketName) + if err == nil { + logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err) + return + } + if err.Error() != "The specified bucket does not exist" { + logError(testName, function, args, startTime, "", "RemoveBucket failed", err) + return + } + + os.Remove(fileName) + os.Remove(fileName + "-f") + successLogger(testName, functionAll, args, startTime).Info() +} + +// Test for validating GetObject Reader* methods functioning when the +// object is modified in the object store. +func testGetObjectModified() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload an object. + objectName := "myobject" + args["objectName"] = objectName + content := "helloworld" + _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) + return + } + + defer c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err) + return + } + defer reader.Close() + + // Read a few bytes of the object. + b := make([]byte, 5) + n, err := reader.ReadAt(b, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err) + return + } + + // Upload different contents to the same object while object is being read. + newContent := "goodbyeworld" + _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) + return + } + + // Confirm that a Stat() call in between doesn't change the Object's cached etag. + _, err = reader.Stat() + expectedError := "At least one of the pre-conditions you specified did not hold" + if err.Error() != expectedError { + logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err) + return + } + + // Read again only to find object contents have been modified since last read. + _, err = reader.ReadAt(b, int64(n)) + if err.Error() != expectedError { + logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test validates putObject to upload a file seeked at a given offset. +func testPutObjectUploadSeekedObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, fileToUpload, contentType)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileToUpload": "", + "contentType": "binary/octet-stream", + } + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + var tempfile *os.File + + if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" { + tempfile, err = os.Open(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + args["fileToUpload"] = fileName + } else { + tempfile, err = ioutil.TempFile("", "minio-go-upload-test-") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile create failed", err) + return + } + args["fileToUpload"] = tempfile.Name() + + // Generate 100kB data + if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + + defer os.Remove(tempfile.Name()) + + // Seek back to the beginning of the file. + tempfile.Seek(0, 0) + } + var length = 100 * humanize.KiByte + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + offset := length / 2 + if _, err = tempfile.Seek(int64(offset), 0); err != nil { + logError(testName, function, args, startTime, "", "TempFile seek failed", err) + return + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + tempfile.Close() + + obj, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer obj.Close() + + n, err := obj.Seek(int64(offset), 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != int64(offset) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err) + return + } + + _, err = c.PutObject(context.Background(), bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName+"getobject", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(length-offset) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests bucket re-create errors. +func testMakeBucketErrorV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + region := "eu-west-1" + args["bucketName"] = bucketName + args["region"] = region + + // Make a new bucket in 'eu-west-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { + logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err) + return + } + // Verify valid error response from server. + if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && + minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { + logError(testName, function, args, startTime, "", "Invalid error returned by server", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object reader to not throw error on being closed twice. +func testGetObjectClosedTwiceV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object is already closed, should return error", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject hidden contentType setting +func testFPutObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Make a temp file with 11*1024*1024 bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024)) + n, err := io.CopyN(file, r, 11*1024*1024) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if n != int64(11*1024*1024) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) + return + } + + // Close the file pro-actively for windows. + err = file.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + + // Set base object name + objectName := bucketName + "FPutObject" + args["objectName"] = objectName + args["fileName"] = file.Name() + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/octet-stream) + args["objectName"] = objectName + "-Octet" + args["contentType"] = "" + + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Add extension to temp file name + fileName := file.Name() + err = os.Rename(fileName, fileName+".gtar") + if err != nil { + logError(testName, function, args, startTime, "", "Rename failed", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/x-gtar) + args["objectName"] = objectName + "-Octet" + args["contentType"] = "" + args["fileName"] = fileName + ".gtar" + + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Check headers and sizes + rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if rStandard.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + + if rStandard.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err) + return + } + + rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rOctet.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err) + return + } + + if rOctet.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + + rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rGTar.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err) + return + } + + os.Remove(fileName + ".gtar") + successLogger(testName, function, args, startTime).Info() +} + +// Tests various bucket supported formats. +func testMakeBucketRegionsV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "eu-west-1"}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err) + return + } + + // Make a new bucket with '.' in its name, in 'us-west-2'. This + // request is internally staged into a path style instead of + // virtual host style. + if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: "us-west-2"}); err != nil { + args["bucketName"] = bucketName + ".withperiod" + args["region"] = "us-west-2" + logError(testName, function, args, startTime, "", "MakeBucket test with a bucket name with period, '.', failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName+".withperiod", c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests get object ReaderSeeker interface methods. +func testGetObjectReadSeekFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data. + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + + offset := int64(2048) + n, err := r.Seek(offset, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) + return + } + n, err = r.Seek(0, 1) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) + return + } + _, err = r.Seek(offset, 2) + if err == nil { + logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err) + return + } + n, err = r.Seek(-offset, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != st.Size-offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err) + return + } + + var buffer1 bytes.Buffer + if _, err = io.CopyN(&buffer1, r, st.Size); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + } + if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + + // Seek again and read again. + n, err = r.Seek(offset-1, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != (offset - 1) { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err) + return + } + + var buffer2 bytes.Buffer + if _, err = io.CopyN(&buffer2, r, st.Size); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + } + // Verify now lesser bytes. + if !bytes.Equal(buf[2047:], buffer2.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests get object ReaderAt interface methods. +func testGetObjectReadAtFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + + offset := int64(2048) + + // Read directly + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + m, err := r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, bufSize) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, bufSize+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests copy object +func testCopyObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + // Make a new bucket in 'us-east-1' (destination bucket). + err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName+"-copy", c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + r.Close() + + // Copy Source + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + MatchETag: objInfo.ETag, + } + args["source"] = src + + // Set copy conditions. + dst := minio.CopyDestOptions{ + Bucket: bucketName + "-copy", + Object: objectName + "-copy", + } + args["destination"] = dst + + // Perform the Copy + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Source object + r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + objInfoCopy, err := readerCopy.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if objInfo.Size != objInfoCopy.Size { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err) + return + } + + // Close all the readers. + r.Close() + readerCopy.Close() + + // CopyObject again but with wrong conditions + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + NoMatchETag: objInfo.ETag, + } + + // Perform the Copy which should fail + _, err = c.CopyObject(context.Background(), dst, src) + if err == nil { + logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testComposeObjectErrorCasesWrapper(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Test that more than 10K source objects cannot be + // concatenated. + srcArr := [10001]minio.CopySrcOptions{} + srcSlice := srcArr[:] + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "object", + } + + args["destination"] = dst + // Just explain about srcArr in args["sourceList"] + // to stop having 10,001 null headers logged + args["sourceList"] = "source array of 10,001 elements" + if _, err := c.ComposeObject(context.Background(), dst, srcSlice...); err == nil { + logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err) + return + } else if err.Error() != "There must be as least one and up to 10000 source objects." { + logError(testName, function, args, startTime, "", "Got unexpected error", err) + return + } + + // Create a source with invalid offset spec and check that + // error is returned: + // 1. Create the source object. + const badSrcSize = 5 * 1024 * 1024 + buf := bytes.Repeat([]byte("1"), badSrcSize) + _, err = c.PutObject(context.Background(), bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // 2. Set invalid range spec on the object (going beyond + // object size) + badSrc := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "badObject", + MatchRange: true, + Start: 1, + End: badSrcSize, + } + + // 3. ComposeObject call should fail. + if _, err := c.ComposeObject(context.Background(), dst, badSrc); err == nil { + logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err) + return + } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") { + logError(testName, function, args, startTime, "", "Got invalid error", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test expected error cases +func testComposeObjectErrorCasesV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + testComposeObjectErrorCasesWrapper(c) +} + +func testComposeMultipleSources(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{ + "destination": "", + "sourceList": "", + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload a small source object + const srcSize = 1024 * 1024 * 5 + buf := bytes.Repeat([]byte("1"), srcSize) + _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // We will append 10 copies of the object. + srcs := []minio.CopySrcOptions{} + for i := 0; i < 10; i++ { + srcs = append(srcs, minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + }) + } + + // make the last part very small + srcs[9].MatchRange = true + + args["sourceList"] = srcs + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject", + } + args["destination"] = dst + + ui, err := c.ComposeObject(context.Background(), dst, srcs...) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + if ui.Size != 9*srcSize+1 { + logError(testName, function, args, startTime, "", "ComposeObject returned unexpected size", err) + return + } + + objProps, err := c.StatObject(context.Background(), bucketName, "dstObject", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if objProps.Size != 9*srcSize+1 { + logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test concatenating multiple 10K objects V2 +func testCompose10KSourcesV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + testComposeMultipleSources(c) +} + +func testEncryptedEmptyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, objectSize, opts)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object")) + + // 1. create an sse-c encrypted object to copy by uploading + const srcSize = 0 + var buf []byte // Empty buffer + args["objectName"] = "object" + _, err = c.PutObject(context.Background(), bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + // 2. Test CopyObject for an empty object + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "object", + Encryption: sse, + } + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: sse, + } + + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + function = "CopyObject(dst, src)" + logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err) + return + } + + // 3. Test Key rotation + newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object")) + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: sse, + } + + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: newSSE, + } + + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + function = "CopyObject(dst, src)" + logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err) + return + } + + // 4. Download the object. + reader, err := c.GetObject(context.Background(), bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer reader.Close() + + decBytes, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err) + return + } + + delete(args, "objectName") + successLogger(testName, function, args, startTime).Info() +} + +func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) { + // initialize logging params + startTime := time.Now() + testName := getFuncNameLoc(2) + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + var srcEncryption, dstEncryption encrypt.ServerSide + + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // 1. create an sse-c encrypted object to copy by uploading + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB + _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ServerSideEncryption: sseSrc, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + if sseSrc != nil && sseSrc.Type() != encrypt.S3 { + srcEncryption = sseSrc + } + + // 2. copy object and change encryption key + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: srcEncryption, + } + args["source"] = src + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject", + Encryption: sseDst, + } + args["destination"] = dst + + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + if sseDst != nil && sseDst.Type() != encrypt.S3 { + dstEncryption = sseDst + } + // 3. get copied object and check if content is equal + coreClient := minio.Core{c} + reader, _, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + decBytes, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + reader.Close() + + // Test key rotation for source object in-place. + var newSSE encrypt.ServerSide + if sseSrc != nil && sseSrc.Type() == encrypt.SSEC { + newSSE = encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key + } + if sseSrc != nil && sseSrc.Type() == encrypt.S3 { + newSSE = encrypt.NewSSE() + } + if newSSE != nil { + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: newSSE, + } + args["destination"] = dst + + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Get copied object and check if content is equal + reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + decBytes, err = ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + reader.Close() + + // Test in-place decryption. + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObject", + } + args["destination"] = dst + + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: newSSE, + } + args["source"] = src + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject Key rotation failed", err) + return + } + } + + // Get copied decrypted object and check if content is equal + reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer reader.Close() + + decBytes, err = ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test encrypted copy object +func testUnencryptedToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, nil, sseDst) +} + +// Test encrypted copy object +func testUnencryptedToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + var sseSrc encrypt.ServerSide + sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testUnencryptedToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + var sseSrc, sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + var sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + var sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedCopyObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +func testDecryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object" + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{ + ServerSideEncryption: encryption, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + Encryption: encrypt.SSECopy(encryption), + } + args["source"] = src + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "decrypted-" + objectName, + } + args["destination"] = dst + + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + if _, err = c.GetObject(context.Background(), bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +func testSSECMultipartEncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 6MB of data + buf := bytes.Repeat([]byte("abcdef"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + + // Upload a 6MB object using multipart mechanism + uploadID, err := c.NewMultipartUpload(context.Background(), bucketName, objectName, minio.PutObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + var completeParts []minio.CompletePart + + part, err := c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024, "", "", srcencryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) + return + } + completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) + + part, err = c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 2, bytes.NewReader(buf[5*1024*1024:]), 1024*1024, "", "", srcencryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) + return + } + completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err = c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = objInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (6*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 6*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 6*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 6MB", err) + return + } + + getOpts.SetRange(6*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 6*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:6*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 6MB", err) + return + } + if getBuf[6*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation +func testSSECEncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + ServerSideEncryption: srcencryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSEC encrypted to unencrypted copy +func testSSECEncryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcencryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + var dstencryption encrypt.ServerSide + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSEC encrypted to SSE-S3 encrypted copy +func testSSECEncryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + ServerSideEncryption: srcencryption, + } + + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSEC encryption copy part +func testUnencryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy +func testUnencryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy +func testUnencryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSE-S3 to SSEC encryption copy part +func testSSES3EncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcEncryption := encrypt.NewSSE() + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy +func testSSES3EncryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + srcEncryption := encrypt.NewSSE() + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy +func testSSES3EncryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + srcEncryption := encrypt.NewSSE() + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} +func testUserMetadataCopying() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // c.TraceOn(os.Stderr) + testUserMetadataCopyingWrapper(c) +} + +func testUserMetadataCopyingWrapper(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { + h.Add(k, vs[0]) + } + } + return h + } + + // 1. create a client encrypted object to copy by uploading + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB + metadata := make(http.Header) + metadata.Set("x-amz-meta-myheader", "myvalue") + m := make(map[string]string) + m["x-amz-meta-myheader"] = "myvalue" + _, err = c.PutObject(context.Background(), bucketName, "srcObject", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err) + return + } + if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 2. create source + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + } + + // 2.1 create destination with metadata set + dst1 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-1", + UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, + ReplaceMetadata: true, + } + + // 3. Check that copying to an object with metadata set resets + // the headers on the copy. + args["source"] = src + args["destination"] = dst1 + _, err = c.CopyObject(context.Background(), dst1, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + expectedHeaders := make(http.Header) + expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 4. create destination with no metadata set and same source + dst2 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-2", + } + + // 5. Check that copying to an object with no metadata set, + // copies metadata. + args["source"] = src + args["destination"] = dst2 + _, err = c.CopyObject(context.Background(), dst2, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + expectedHeaders = metadata + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 6. Compose a pair of sources. + dst3 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-3", + ReplaceMetadata: true, + } + + function = "ComposeObject(destination, sources)" + args["source"] = []minio.CopySrcOptions{src, src} + args["destination"] = dst3 + _, err = c.ComposeObject(context.Background(), dst3, src, src) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Check that no headers are copied in this case + if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 7. Compose a pair of sources with dest user metadata set. + dst4 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-4", + UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, + ReplaceMetadata: true, + } + + function = "ComposeObject(destination, sources)" + args["source"] = []minio.CopySrcOptions{src, src} + args["destination"] = dst4 + _, err = c.ComposeObject(context.Background(), dst4, src, src) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Check that no headers are copied in this case + expectedHeaders = make(http.Header) + expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testUserMetadataCopyingV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // c.TraceOn(os.Stderr) + testUserMetadataCopyingWrapper(c) +} + +func testStorageClassMetadataPutObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassMetadataPutObject()" + args := map[string]interface{}{} + testName := getFuncName() + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { + for _, v := range vs { + h.Add(k, v) + } + } + } + return h + } + + metadata := make(http.Header) + metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") + + emptyMetadata := make(http.Header) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB + + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Get the returned metadata + returnedMeta := fetchMeta("srcObjectRRSClass") + + // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) + if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + metadata = make(http.Header) + metadata.Set("x-amz-storage-class", "STANDARD") + + _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) { + logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testStorageClassInvalidMetadataPutObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassInvalidMetadataPutObject()" + args := map[string]interface{}{} + testName := getFuncName() + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB + + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"}) + if err == nil { + logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testStorageClassMetadataCopyObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassMetadataCopyObject()" + args := map[string]interface{}{} + testName := getFuncName() + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) + args["bucket"] = bucketName + args["object"] = object + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { + for _, v := range vs { + h.Add(k, v) + } + } + } + return h + } + + metadata := make(http.Header) + metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") + + emptyMetadata := make(http.Header) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) + + // Put an object with RRS Storage class + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Make server side copy of object uploaded in previous step + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObjectRRSClass", + } + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObjectRRSClassCopy", + } + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed on RRS", err) + return + } + + // Get the returned metadata + returnedMeta := fetchMeta("srcObjectRRSClassCopy") + + // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) + if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + metadata = make(http.Header) + metadata.Set("x-amz-storage-class", "STANDARD") + + // Put an object with Standard Storage class + _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Make server side copy of object uploaded in previous step + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObjectSSClass", + } + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObjectSSClassCopy", + } + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed on SS", err) + return + } + // Fetch the meta data of copied object + if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) { + logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test put object with size -1 byte object. +func testPutObjectNoLengthV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := bucketName + "unique" + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + args["size"] = bufSize + + // Upload an object. + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, -1, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(st.Size), err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test put objects of unknown size. +func testPutObjectsUnknownV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size,opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Issues are revealed by trying to upload multiple files of unknown size + // sequentially (on 4GB machines) + for i := 1; i <= 4; i++ { + // Simulate that we could be receiving byte slices of data that we want + // to upload as a file + rpipe, wpipe := io.Pipe() + defer rpipe.Close() + go func() { + b := []byte("test") + wpipe.Write(b) + wpipe.Close() + }() + + // Upload the object. + objectName := fmt.Sprintf("%sunique%d", bucketName, i) + args["objectName"] = objectName + + ui, err := c.PutObject(context.Background(), bucketName, objectName, rpipe, -1, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) + return + } + + if ui.Size != 4 { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(ui.Size), nil) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObjectStreaming failed", err) + return + } + + if st.Size != int64(4) { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(st.Size), err) + return + } + + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test put object with 0 byte object. +func testPutObject0ByteV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": 0, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := bucketName + "unique" + args["objectName"] = objectName + args["opts"] = minio.PutObjectOptions{} + + // Upload an object. + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObjectWithSize failed", err) + return + } + if st.Size != 0 { + logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(st.Size), err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test expected error cases +func testComposeObjectErrorCases() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + testComposeObjectErrorCasesWrapper(c) +} + +// Test concatenating multiple 10K objects V4 +func testCompose10KSources() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + testComposeMultipleSources(c) +} + +// Tests comprehensive list of all methods. +func testFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "testFunctionalV2()" + functionAll := "" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + location := "us-east-1" + // Make a new bucket. + function = "MakeBucket(bucketName, location)" + functionAll = "MakeBucket(bucketName, location)" + args = map[string]interface{}{ + "bucketName": bucketName, + "location": location, + } + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + _, err = file.Write(buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + } + file.Close() + + // Verify if bucket exits and you have access. + var exists bool + function = "BucketExists(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + exists, err = c.BucketExists(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "BucketExists failed", err) + return + } + if !exists { + logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err) + return + } + + // Make the bucket 'public read/write'. + function = "SetBucketPolicy(bucketName, bucketPolicy)" + functionAll += ", " + function + + readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads", "s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `"],"Sid": ""}]}` + + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readWritePolicy, + } + err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) + + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + + // List all buckets. + function = "ListBuckets()" + functionAll += ", " + function + args = nil + buckets, err := c.ListBuckets(context.Background()) + if len(buckets) == 0 { + logError(testName, function, args, startTime, "", "List buckets cannot be empty", err) + return + } + if err != nil { + logError(testName, function, args, startTime, "", "ListBuckets failed", err) + return + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err) + return + } + + objectName := bucketName + "unique" + + // Generate data + buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19)) + + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "contentType": "", + } + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) + return + } + + objectNameNoLength := objectName + "-nolength" + args["objectName"] = objectNameNoLength + _, err = c.PutObject(context.Background(), bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + st, err = c.StatObject(context.Background(), bucketName, objectNameNoLength, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) + return + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: isRecursive}) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err) + return + } + + incompObjNotFound := true + function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) + return + } + + function = "GetObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + newReadBytes, err := ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + function = "FGetObject(bucketName, objectName, fileName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "fileName": fileName + "-f", + } + err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FgetObject failed", err) + return + } + + // Generate presigned HEAD object url. + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) + return + } + + transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) + if err != nil { + logError(testName, function, args, startTime, "", "DefaultTransport failed", err) + return + } + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: transport, + } + + req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) + return + } + + // Verify if presigned url works. + resp, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err) + return + } + if resp.Header.Get("ETag") == "" { + logError(testName, function, args, startTime, "", "Got empty ETag", err) + return + } + resp.Body.Close() + + // Generate presigned GET object url. + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + resp.Body.Close() + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + // Set request parameters. + reqParams := make(url.Values) + reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") + // Generate presigned GET object url. + args["reqParams"] = reqParams + presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err = ioutil.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + // Verify content disposition. + if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { + logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + "expires": 3600 * time.Second, + } + presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + // Generate data more than 32K + buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) + + req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) + return + } + + function = "GetObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + } + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + os.Remove(fileName) + os.Remove(fileName + "-f") + successLogger(testName, functionAll, args, startTime).Info() +} + +// Test get object with GetObject with context +func testGetObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + cancel() + + r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) + return + } + + if _, err = r.Stat(); err == nil { + logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) + return + } + r.Close() + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + defer cancel() + + // Read the data back + r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "object Stat call failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "object Close() call failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + +// Test get object with FGetObject with a user provided context +func testFGetObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FGetObject(ctx, bucketName, objectName, fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-1-MB"] + var reader = getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + fileName := "tempfile-context" + args["fileName"] = fileName + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) + return + } + if err = os.Remove(fileName + "-fcontext"); err != nil { + logError(testName, function, args, startTime, "", "Remove file failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + +// Test get object with GetObject with a user provided context +func testGetObjectRanges() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(ctx, bucketName, objectName, fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + rng := rand.NewSource(time.Now().UnixNano()) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rng, "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rng, "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + tests := []struct { + start int64 + end int64 + }{ + { + start: 1024, + end: 1024 + 1<<20, + }, + { + start: 20e6, + end: 20e6 + 10000, + }, + { + start: 40e6, + end: 40e6 + 10000, + }, + { + start: 60e6, + end: 60e6 + 10000, + }, + { + start: 80e6, + end: 80e6 + 10000, + }, + { + start: 120e6, + end: int64(bufSize), + }, + } + for _, test := range tests { + wantRC := getDataReader("datafile-129-MB") + io.CopyN(ioutil.Discard, wantRC, test.start) + want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1)) + opts := minio.GetObjectOptions{} + opts.SetRange(test.start, test.end) + args["opts"] = fmt.Sprintf("%+v", test) + obj, err := c.GetObject(ctx, bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) + return + } + err = crcMatches(obj, want) + if err != nil { + logError(testName, function, args, startTime, "", fmt.Sprintf("GetObject offset %d -> %d", test.start, test.end), err) + return + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object ACLs with GetObjectACL with custom provided context +func testGetObjectACLContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObjectACL(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // skipping region functional tests for non s3 runs + if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { + ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() + return + } + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-1-MB"] + var reader = getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Add meta data to add a canned acl + metaData := map[string]string{ + "X-Amz-Acl": "public-read-write", + } + + _, err = c.PutObject(context.Background(), bucketName, + objectName, reader, int64(bufSize), + minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + UserMetadata: metaData, + }) + + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + args["ctx"] = ctx + defer cancel() + + // Read the data back + objectInfo, getObjectACLErr := c.GetObjectACL(ctx, bucketName, objectName) + if getObjectACLErr != nil { + logError(testName, function, args, startTime, "", "GetObjectACL failed. ", getObjectACLErr) + return + } + + s, ok := objectInfo.Metadata["X-Amz-Acl"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Acl\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" canned acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "public-read-write" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil) + return + } + + bufSize = dataFileMap["datafile-1-MB"] + var reader2 = getDataReader("datafile-1-MB") + defer reader2.Close() + // Save the data + objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Add meta data to add a canned acl + metaData = map[string]string{ + "X-Amz-Grant-Read": "id=fooread@minio.go", + "X-Amz-Grant-Write": "id=foowrite@minio.go", + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) + args["ctx"] = ctx + defer cancel() + + // Read the data back + objectInfo, getObjectACLErr = c.GetObjectACL(ctx, bucketName, objectName) + if getObjectACLErr == nil { + logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr) + return + } + + if len(objectInfo.Metadata) != 3 { + logError(testName, function, args, startTime, "", "GetObjectACL fail expected \"3\" ACLs but got "+fmt.Sprintf(`"%d"`, len(objectInfo.Metadata)), nil) + return + } + + s, ok = objectInfo.Metadata["X-Amz-Grant-Read"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Read\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "fooread@minio.go" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"fooread@minio.go\" got "+fmt.Sprintf("%q", s), nil) + return + } + + s, ok = objectInfo.Metadata["X-Amz-Grant-Write"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Write\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "foowrite@minio.go" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"foowrite@minio.go\" got "+fmt.Sprintf("%q", s), nil) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test validates putObject with context to see if request cancellation is honored for V2. +func testPutObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(ctx, bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "size": "", + "opts": "", + } + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + bufSize := dataFileMap["datatfile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + args["ctx"] = ctx + args["size"] = bufSize + defer cancel() + + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with short timeout failed", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + + defer cancel() + reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + +// Test get object with GetObject with custom context +func testGetObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + cancel() + + r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) + return + } + if _, err = r.Stat(); err == nil { + logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) + return + } + r.Close() + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject shouldn't fail on longer timeout", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "object Stat call failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", " object Close() call failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + +// Test get object with FGetObject with custom context +func testFGetObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FGetObject(ctx, bucketName, objectName,fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket call failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datatfile-1-MB"] + var reader = getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + fileName := "tempfile-context" + args["fileName"] = fileName + + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject call shouldn't fail on long timeout", err) + return + } + + if err = os.Remove(fileName + "-fcontext"); err != nil { + logError(testName, function, args, startTime, "", "Remove file failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + +// Test list object v1 and V2 +func testListObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)" + args := map[string]interface{}{ + "bucketName": "", + "objectPrefix": "", + "recursive": "true", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + testObjects := []struct { + name string + storageClass string + }{ + // Special characters + {"foo bar", "STANDARD"}, + {"foo-%", "STANDARD"}, + {"random-object-1", "STANDARD"}, + {"random-object-2", "REDUCED_REDUNDANCY"}, + } + + for i, object := range testObjects { + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(context.Background(), bucketName, object.name, reader, int64(bufSize), + minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: object.storageClass}) + if err != nil { + logError(testName, function, args, startTime, "", fmt.Sprintf("PutObject %d call failed", i+1), err) + return + } + } + + testList := func(listFn func(context.Context, string, minio.ListObjectsOptions) <-chan minio.ObjectInfo, bucket string, opts minio.ListObjectsOptions) { + var objCursor int + + // check for object name and storage-class from listing object result + for objInfo := range listFn(context.Background(), bucket, opts) { + if objInfo.Err != nil { + logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err) + return + } + if objInfo.Key != testObjects[objCursor].name { + logError(testName, function, args, startTime, "", "ListObjects does not return expected object name", err) + return + } + if objInfo.StorageClass != testObjects[objCursor].storageClass { + // Ignored as Gateways (Azure/GCS etc) wont return storage class + ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info() + } + objCursor++ + } + + if objCursor != len(testObjects) { + logError(testName, function, args, startTime, "", "ListObjects returned unexpected number of items", errors.New("")) + return + } + } + + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, UseV1: true}) + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true}) + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, WithMetadata: true}) + + successLogger(testName, function, args, startTime).Info() +} + +// Test deleting multiple objects with object retention set in Governance mode +func testRemoveObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectPrefix": "", + "recursive": "true", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + + n, err := c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + log.Fatalln(err) + } + log.Println("Uploaded", objectName, " of size: ", n, "to bucket: ", bucketName, "Successfully.") + + // Replace with smaller... + bufSize = dataFileMap["datafile-10-kB"] + reader = getDataReader("datafile-10-kB") + defer reader.Close() + + n, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + log.Fatalln(err) + } + log.Println("Uploaded", objectName, " of size: ", n, "to bucket: ", bucketName, "Successfully.") + + t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC) + m := minio.RetentionMode(minio.Governance) + opts := minio.PutObjectRetentionOptions{ + GovernanceBypass: false, + RetainUntilDate: &t, + Mode: &m, + } + err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) + if err != nil { + log.Fatalln(err) + } + + objectsCh := make(chan minio.ObjectInfo) + // Send object names that are needed to be removed to objectsCh + go func() { + defer close(objectsCh) + // List all objects from a bucket-name with a matching prefix. + for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { + if object.Err != nil { + log.Fatalln(object.Err) + } + objectsCh <- object + } + }() + + for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) { + // Error is expected here because Retention is set on the object + // and RemoveObjects is called without Bypass Governance + if rErr.Err == nil { + logError(testName, function, args, startTime, "", "Expected error during deletion", nil) + return + } + } + + objectsCh1 := make(chan minio.ObjectInfo) + + // Send object names that are needed to be removed to objectsCh + go func() { + defer close(objectsCh1) + // List all objects from a bucket-name with a matching prefix. + for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { + if object.Err != nil { + log.Fatalln(object.Err) + } + objectsCh1 <- object + } + }() + + opts1 := minio.RemoveObjectsOptions{ + GovernanceBypass: true, + } + + for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh1, opts1) { + // Error is not expected here because Retention is set on the object + // and RemoveObjects is called with Bypass Governance + logError(testName, function, args, startTime, "", "Error detected during deletion", rErr.Err) + return + } + + // Delete all objects and buckets + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Convert string to bool and always return false if any error +func mustParseBool(str string) bool { + b, err := strconv.ParseBool(str) + if err != nil { + return false + } + return b +} + +func main() { + // Output to stdout instead of the default stderr + log.SetOutput(os.Stdout) + // create custom formatter + mintFormatter := mintJSONFormatter{} + // set custom formatter + log.SetFormatter(&mintFormatter) + // log Info or above -- success cases are Info level, failures are Fatal level + log.SetLevel(log.InfoLevel) + + tls := mustParseBool(os.Getenv(enableHTTPS)) + kms := mustParseBool(os.Getenv(enableKMS)) + if os.Getenv(enableKMS) == "" { + // Default to KMS tests. + kms = true + } + // execute tests + if isFullMode() { + testMakeBucketErrorV2() + testGetObjectClosedTwiceV2() + testFPutObjectV2() + testMakeBucketRegionsV2() + testGetObjectReadSeekFunctionalV2() + testGetObjectReadAtFunctionalV2() + testGetObjectRanges() + testCopyObjectV2() + testFunctionalV2() + testComposeObjectErrorCasesV2() + testCompose10KSourcesV2() + testUserMetadataCopyingV2() + testPutObject0ByteV2() + testPutObjectNoLengthV2() + testPutObjectsUnknownV2() + testGetObjectContextV2() + testFPutObjectContextV2() + testFGetObjectContextV2() + testPutObjectContextV2() + testMakeBucketError() + testMakeBucketRegions() + testPutObjectWithMetadata() + testPutObjectReadAt() + testPutObjectStreaming() + testGetObjectSeekEnd() + testGetObjectClosedTwice() + testRemoveMultipleObjects() + testFPutObjectMultipart() + testFPutObject() + testGetObjectReadSeekFunctional() + testGetObjectReadAtFunctional() + testGetObjectReadAtWhenEOFWasReached() + testPresignedPostPolicy() + testCopyObject() + testComposeObjectErrorCases() + testCompose10KSources() + testUserMetadataCopying() + testBucketNotification() + testFunctional() + testGetObjectModified() + testPutObjectUploadSeekedObject() + testGetObjectContext() + testFPutObjectContext() + testFGetObjectContext() + testGetObjectACLContext() + testPutObjectContext() + testStorageClassMetadataPutObject() + testStorageClassInvalidMetadataPutObject() + testStorageClassMetadataCopyObject() + testPutObjectWithContentLanguage() + testListObjects() + testRemoveObjects() + testListObjectVersions() + testStatObjectWithVersioning() + testGetObjectWithVersioning() + testCopyObjectWithVersioning() + testComposeObjectWithVersioning() + testRemoveObjectWithVersioning() + testRemoveObjectsWithVersioning() + testObjectTaggingWithVersioning() + + // SSE-C tests will only work over TLS connection. + if tls { + testSSECEncryptionPutGet() + testSSECEncryptionFPut() + testSSECEncryptedGetObjectReadAtFunctional() + testSSECEncryptedGetObjectReadSeekFunctional() + testEncryptedCopyObjectV2() + testEncryptedSSECToSSECCopyObject() + testEncryptedSSECToUnencryptedCopyObject() + testUnencryptedToSSECCopyObject() + testUnencryptedToUnencryptedCopyObject() + testEncryptedEmptyObject() + testDecryptedCopyObject() + testSSECEncryptedToSSECCopyObjectPart() + testSSECMultipartEncryptedToSSECCopyObjectPart() + testSSECEncryptedToUnencryptedCopyPart() + testUnencryptedToSSECCopyObjectPart() + testUnencryptedToUnencryptedCopyPart() + testEncryptedSSECToSSES3CopyObject() + testEncryptedSSES3ToSSECCopyObject() + testSSECEncryptedToSSES3CopyObjectPart() + testSSES3EncryptedToSSECCopyObjectPart() + } + + // KMS tests + if kms { + testSSES3EncryptionPutGet() + testSSES3EncryptionFPut() + testSSES3EncryptedGetObjectReadAtFunctional() + testSSES3EncryptedGetObjectReadSeekFunctional() + testEncryptedSSES3ToSSES3CopyObject() + testEncryptedSSES3ToUnencryptedCopyObject() + testUnencryptedToSSES3CopyObject() + testUnencryptedToSSES3CopyObjectPart() + testSSES3EncryptedToUnencryptedCopyPart() + testSSES3EncryptedToSSES3CopyObjectPart() + } + } else { + testFunctional() + testFunctionalV2() + } +} diff --git a/vendor/github.com/minio/minio-go/v7/go.mod b/vendor/github.com/minio/minio-go/v7/go.mod new file mode 100644 index 00000000..5468818c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/go.mod @@ -0,0 +1,27 @@ +module github.com/minio/minio-go/v7 + +go 1.14 + +require ( + github.com/dustin/go-humanize v1.0.0 + github.com/google/uuid v1.1.1 + github.com/json-iterator/go v1.1.10 + github.com/klauspost/cpuid v1.3.1 // indirect + github.com/kr/pretty v0.1.0 // indirect + github.com/minio/md5-simd v1.1.0 + github.com/minio/sha256-simd v0.1.1 + github.com/mitchellh/go-homedir v1.1.0 + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/rs/xid v1.2.1 + github.com/sirupsen/logrus v1.8.1 + github.com/smartystreets/goconvey v1.6.4 // indirect + github.com/stretchr/testify v1.4.0 // indirect + golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899 + golang.org/x/net v0.0.0-20200707034311-ab3426394381 + golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae // indirect + golang.org/x/text v0.3.3 // indirect + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + gopkg.in/ini.v1 v1.57.0 + gopkg.in/yaml.v2 v2.2.8 // indirect +) diff --git a/vendor/github.com/minio/minio-go/v7/go.sum b/vendor/github.com/minio/minio-go/v7/go.sum new file mode 100644 index 00000000..1ba98c4f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/go.sum @@ -0,0 +1,76 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= +github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= +github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899 h1:DZhuSZLsGlFL4CmhA8BcRA0mnthyA/nZ00AqCUo7vHg= +golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= +gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/minio/minio-go/v7/hook-reader.go b/vendor/github.com/minio/minio-go/v7/hook-reader.go new file mode 100644 index 00000000..f251c1e9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/hook-reader.go @@ -0,0 +1,85 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "io" +) + +// hookReader hooks additional reader in the source stream. It is +// useful for making progress bars. Second reader is appropriately +// notified about the exact number of bytes read from the primary +// source on each Read operation. +type hookReader struct { + source io.Reader + hook io.Reader +} + +// Seek implements io.Seeker. Seeks source first, and if necessary +// seeks hook if Seek method is appropriately found. +func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) { + // Verify for source has embedded Seeker, use it. + sourceSeeker, ok := hr.source.(io.Seeker) + if ok { + n, err = sourceSeeker.Seek(offset, whence) + if err != nil { + return 0, err + } + } + + // Verify if hook has embedded Seeker, use it. + hookSeeker, ok := hr.hook.(io.Seeker) + if ok { + var m int64 + m, err = hookSeeker.Seek(offset, whence) + if err != nil { + return 0, err + } + if n != m { + return 0, fmt.Errorf("hook seeker seeked %d bytes, expected source %d bytes", m, n) + } + } + return n, nil +} + +// Read implements io.Reader. Always reads from the source, the return +// value 'n' number of bytes are reported through the hook. Returns +// error for all non io.EOF conditions. +func (hr *hookReader) Read(b []byte) (n int, err error) { + n, err = hr.source.Read(b) + if err != nil && err != io.EOF { + return n, err + } + // Progress the hook with the total read bytes from the source. + if _, herr := hr.hook.Read(b[:n]); herr != nil { + if herr != io.EOF { + return n, herr + } + } + return n, err +} + +// newHook returns a io.ReadSeeker which implements hookReader that +// reports the data read from the source to the hook. +func newHook(source, hook io.Reader) io.Reader { + if hook == nil { + return source + } + return &hookReader{source, hook} +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go new file mode 100644 index 00000000..3b1b547b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go @@ -0,0 +1,214 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/hex" + "encoding/xml" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/signer" + sha256 "github.com/minio/sha256-simd" +) + +// AssumeRoleResponse contains the result of successful AssumeRole request. +type AssumeRoleResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse" json:"-"` + + Result AssumeRoleResult `xml:"AssumeRoleResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// AssumeRoleResult - Contains the response to a successful AssumeRole +// request, including temporary credentials that can be used to make +// MinIO API requests. +type AssumeRoleResult struct { + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize int `xml:",omitempty"` +} + +// A STSAssumeRole retrieves credentials from MinIO service, and keeps track if +// those credentials are expired. +type STSAssumeRole struct { + Expiry + + // Required http Client to use when connecting to MinIO STS service. + Client *http.Client + + // STS endpoint to fetch STS credentials. + STSEndpoint string + + // various options for this request. + Options STSAssumeRoleOptions +} + +// STSAssumeRoleOptions collection of various input options +// to obtain AssumeRole credentials. +type STSAssumeRoleOptions struct { + // Mandatory inputs. + AccessKey string + SecretKey string + + Location string // Optional commonly needed with AWS STS. + DurationSeconds int // Optional defaults to 1 hour. + + // Optional only valid if using with AWS STS + RoleARN string + RoleSessionName string +} + +// NewSTSAssumeRole returns a pointer to a new +// Credentials object wrapping the STSAssumeRole. +func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) { + if stsEndpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if opts.AccessKey == "" || opts.SecretKey == "" { + return nil, errors.New("AssumeRole credentials access/secretkey is mandatory") + } + return New(&STSAssumeRole{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + STSEndpoint: stsEndpoint, + Options: opts, + }), nil +} + +const defaultDurationSeconds = 3600 + +// closeResponse close non nil response with any response Body. +// convenient wrapper to drain any remaining data on response body. +// +// Subsequently this allows golang http RoundTripper +// to re-use the same connection for future requests. +func closeResponse(resp *http.Response) { + // Callers should close resp.Body when done reading from it. + // If resp.Body is not closed, the Client's underlying RoundTripper + // (typically Transport) may not be able to re-use a persistent TCP + // connection to the server for a subsequent "keep-alive" request. + if resp != nil && resp.Body != nil { + // Drain any remaining Body and then close the connection. + // Without this closing connection would disallow re-using + // the same connection for future uses. + // - http://stackoverflow.com/a/17961593/4465767 + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } +} + +func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssumeRoleOptions) (AssumeRoleResponse, error) { + v := url.Values{} + v.Set("Action", "AssumeRole") + v.Set("Version", STSVersion) + if opts.RoleARN != "" { + v.Set("RoleArn", opts.RoleARN) + } + if opts.RoleSessionName != "" { + v.Set("RoleSessionName", opts.RoleSessionName) + } + if opts.DurationSeconds > defaultDurationSeconds { + v.Set("DurationSeconds", strconv.Itoa(opts.DurationSeconds)) + } else { + v.Set("DurationSeconds", strconv.Itoa(defaultDurationSeconds)) + } + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleResponse{}, err + } + u.Path = "/" + + postBody := strings.NewReader(v.Encode()) + hash := sha256.New() + if _, err = io.Copy(hash, postBody); err != nil { + return AssumeRoleResponse{}, err + } + postBody.Seek(0, 0) + + req, err := http.NewRequest(http.MethodPost, u.String(), postBody) + if err != nil { + return AssumeRoleResponse{}, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil))) + req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location) + + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleResponse{}, err + } + defer closeResponse(resp) + if resp.StatusCode != http.StatusOK { + return AssumeRoleResponse{}, errors.New(resp.Status) + } + + a := AssumeRoleResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { + return AssumeRoleResponse{}, err + } + return a, nil +} + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSAssumeRole) Retrieve() (Value, error) { + a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + SignerType: SignatureV4, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go new file mode 100644 index 00000000..6dc8e9d0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go @@ -0,0 +1,89 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +// A Chain will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The Chain provides a way of chaining multiple providers together +// which will pick the first available using priority order of the +// Providers in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the no credentials value. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again after IsExpired() is true. +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvAWSS3{}, +// &credentials.EnvMinio{}, +// }) +// +// // Usage of ChainCredentials. +// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") +// if err != nil { +// log.Fatalln(err) +// } +// +type Chain struct { + Providers []Provider + curr Provider +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return New(&Chain{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value, returns no credentials(anonymous) +// if no credentials provider returned any value. +// +// If a provider is found with credentials, it will be cached and any calls +// to IsExpired() will return the expired state of the cached provider. +func (c *Chain) Retrieve() (Value, error) { + for _, p := range c.Providers { + creds, _ := p.Retrieve() + // Always prioritize non-anonymous providers, if any. + if creds.AccessKeyID == "" && creds.SecretAccessKey == "" { + continue + } + c.curr = p + return creds, nil + } + // At this point we have exhausted all the providers and + // are left without any credentials return anonymous. + return Value{ + SignerType: SignatureAnonymous, + }, nil +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *Chain) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample new file mode 100644 index 00000000..d793c9e0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample @@ -0,0 +1,17 @@ +{ + "version": "8", + "hosts": { + "play": { + "url": "https://play.min.io", + "accessKey": "Q3AM3UQ867SPQQA43P2F", + "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + "api": "S3v2" + }, + "s3": { + "url": "https://s3.amazonaws.com", + "accessKey": "accessKey", + "secretKey": "secret", + "api": "S3v4" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go new file mode 100644 index 00000000..62d1701e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go @@ -0,0 +1,182 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "sync" + "time" +) + +// STSVersion sts version string +const STSVersion = "2011-06-15" + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Signature Type. + SignerType SignatureType +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type IAMCredentialProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// Credentials - A container for synchronous safe retrieval of credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + sync.Mutex + + creds Value + forceRefresh bool + provider Provider +} + +// New returns a pointer to a new Credentials with the provider set. +func New(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + if c == nil { + return Value{}, nil + } + + c.Lock() + defer c.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.Lock() + defer c.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be refreshed. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.Lock() + defer c.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample new file mode 100644 index 00000000..7fc91d9d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go new file mode 100644 index 00000000..0c94477b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go @@ -0,0 +1,62 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package credentials provides credential retrieval and management +// for S3 compatible object storage. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewFromEnv() +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewFromIAM("") +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go new file mode 100644 index 00000000..b6e60d0e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go @@ -0,0 +1,71 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "os" + +// A EnvAWS retrieves credentials from the environment variables of the +// running process. EnvAWSironment credentials never expire. +// +// EnvAWSironment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY. +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY. +// * Secret Token: AWS_SESSION_TOKEN. +type EnvAWS struct { + retrieved bool +} + +// NewEnvAWS returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvAWS() *Credentials { + return New(&EnvAWS{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvAWS) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + signerType := SignatureV4 + if id == "" || secret == "" { + signerType = SignatureAnonymous + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + SignerType: signerType, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvAWS) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go new file mode 100644 index 00000000..5bfeab14 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go @@ -0,0 +1,68 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "os" + +// A EnvMinio retrieves credentials from the environment variables of the +// running process. EnvMinioironment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: MINIO_ACCESS_KEY. +// * Secret Access Key: MINIO_SECRET_KEY. +// * Access Key ID: MINIO_ROOT_USER. +// * Secret Access Key: MINIO_ROOT_PASSWORD. +type EnvMinio struct { + retrieved bool +} + +// NewEnvMinio returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvMinio() *Credentials { + return New(&EnvMinio{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvMinio) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("MINIO_ROOT_USER") + secret := os.Getenv("MINIO_ROOT_PASSWORD") + + signerType := SignatureV4 + if id == "" || secret == "" { + id = os.Getenv("MINIO_ACCESS_KEY") + secret = os.Getenv("MINIO_SECRET_KEY") + if id == "" || secret == "" { + signerType = SignatureAnonymous + } + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SignerType: signerType, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvMinio) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go new file mode 100644 index 00000000..ccc8251f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go @@ -0,0 +1,120 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "os" + "path/filepath" + + homedir "github.com/mitchellh/go-homedir" + ini "gopkg.in/ini.v1" +) + +// A FileAWSCredentials retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type FileAWSCredentials struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileAWSCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewFileAWSCredentials(filename string, profile string) *Credentials { + return New(&FileAWSCredentials{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileAWSCredentials) Retrieve() (Value, error) { + if p.Filename == "" { + p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") + if p.Filename == "" { + homeDir, err := homedir.Dir() + if err != nil { + return Value{}, err + } + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + } + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + if p.Profile == "" { + p.Profile = "default" + } + } + + p.retrieved = false + + iniProfile, err := loadProfile(p.Filename, p.Profile) + if err != nil { + return Value{}, err + } + + // Default to empty string if not found. + id := iniProfile.Key("aws_access_key_id") + // Default to empty string if not found. + secret := iniProfile.Key("aws_secret_access_key") + // Default to empty string if not found. + token := iniProfile.Key("aws_session_token") + + p.retrieved = true + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + SignerType: SignatureV4, + }, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *FileAWSCredentials) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (*ini.Section, error) { + config, err := ini.Load(filename) + if err != nil { + return nil, err + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return nil, err + } + return iniProfile, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go new file mode 100644 index 00000000..ca6db005 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go @@ -0,0 +1,135 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + + jsoniter "github.com/json-iterator/go" + homedir "github.com/mitchellh/go-homedir" +) + +// A FileMinioClient retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Configuration file example: $HOME/.mc/config.json +type FileMinioClient struct { + // Path to the shared credentials file. + // + // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.mc/config.json" + // Windows: "%USERALIAS%\mc\config.json" + Filename string + + // MinIO Alias to extract credentials from the shared credentials file. If empty + // will default to environment variable "MINIO_ALIAS" or "default" if + // environment variable is also not set. + Alias string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileMinioClient returns a pointer to a new Credentials object +// wrapping the Alias file provider. +func NewFileMinioClient(filename string, alias string) *Credentials { + return New(&FileMinioClient{ + Filename: filename, + Alias: alias, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileMinioClient) Retrieve() (Value, error) { + if p.Filename == "" { + if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok { + p.Filename = value + } else { + homeDir, err := homedir.Dir() + if err != nil { + return Value{}, err + } + p.Filename = filepath.Join(homeDir, ".mc", "config.json") + if runtime.GOOS == "windows" { + p.Filename = filepath.Join(homeDir, "mc", "config.json") + } + } + } + + if p.Alias == "" { + p.Alias = os.Getenv("MINIO_ALIAS") + if p.Alias == "" { + p.Alias = "s3" + } + } + + p.retrieved = false + + hostCfg, err := loadAlias(p.Filename, p.Alias) + if err != nil { + return Value{}, err + } + + p.retrieved = true + return Value{ + AccessKeyID: hostCfg.AccessKey, + SecretAccessKey: hostCfg.SecretKey, + SignerType: parseSignatureType(hostCfg.API), + }, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *FileMinioClient) IsExpired() bool { + return !p.retrieved +} + +// hostConfig configuration of a host. +type hostConfig struct { + URL string `json:"url"` + AccessKey string `json:"accessKey"` + SecretKey string `json:"secretKey"` + API string `json:"api"` +} + +// config config version. +type config struct { + Version string `json:"version"` + Hosts map[string]hostConfig `json:"hosts"` +} + +// loadAliass loads from the file pointed to by shared credentials filename for alias. +// The credentials retrieved from the alias will be returned or error. Error will be +// returned if it fails to read from the file. +func loadAlias(filename, alias string) (hostConfig, error) { + cfg := &config{} + var json = jsoniter.ConfigCompatibleWithStandardLibrary + + configBytes, err := ioutil.ReadFile(filename) + if err != nil { + return hostConfig{}, err + } + if err = json.Unmarshal(configBytes, cfg); err != nil { + return hostConfig{}, err + } + return cfg.Hosts[alias], nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go new file mode 100644 index 00000000..b532bcb6 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go @@ -0,0 +1,367 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bufio" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "path" + "strings" + "time" + + jsoniter "github.com/json-iterator/go" +) + +// DefaultExpiryWindow - Default expiry window. +// ExpiryWindow will allow the credentials to trigger refreshing +// prior to the credentials actually expiring. This is beneficial +// so race conditions with expiring credentials do not cause +// request to fail unexpectedly due to ExpiredTokenException exceptions. +const DefaultExpiryWindow = time.Second * 10 // 10 secs + +// A IAM retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +type IAM struct { + Expiry + + // Required http Client to use when connecting to IAM metadata service. + Client *http.Client + + // Custom endpoint to fetch IAM role credentials. + Endpoint string +} + +// IAM Roles for Amazon EC2 +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +const ( + defaultIAMRoleEndpoint = "http://169.254.169.254" + defaultECSRoleEndpoint = "http://169.254.170.2" + defaultSTSRoleEndpoint = "https://sts.amazonaws.com" + defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/" + tokenRequestTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds" + tokenPath = "/latest/api/token" + tokenTTL = "21600" + tokenRequestHeader = "X-aws-ec2-metadata-token" +) + +// NewIAM returns a pointer to a new Credentials object wrapping the IAM. +func NewIAM(endpoint string) *Credentials { + return New(&IAM{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + Endpoint: endpoint, + }) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired +func (m *IAM) Retrieve() (Value, error) { + token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN") + var roleCreds ec2RoleCredRespBody + var err error + + endpoint := m.Endpoint + switch { + case len(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) > 0: + if len(endpoint) == 0 { + if len(os.Getenv("AWS_REGION")) > 0 { + if strings.HasPrefix(os.Getenv("AWS_REGION"), "cn-") { + endpoint = "https://sts." + os.Getenv("AWS_REGION") + ".amazonaws.com.cn" + } else { + endpoint = "https://sts." + os.Getenv("AWS_REGION") + ".amazonaws.com" + } + } else { + endpoint = defaultSTSRoleEndpoint + } + } + + creds := &STSWebIdentity{ + Client: m.Client, + STSEndpoint: endpoint, + GetWebIDTokenExpiry: func() (*WebIdentityToken, error) { + token, err := ioutil.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) + if err != nil { + return nil, err + } + + return &WebIdentityToken{Token: string(token)}, nil + }, + roleARN: os.Getenv("AWS_ROLE_ARN"), + roleSessionName: os.Getenv("AWS_ROLE_SESSION_NAME"), + } + + stsWebIdentityCreds, err := creds.Retrieve() + if err == nil { + m.SetExpiration(creds.Expiration(), DefaultExpiryWindow) + } + return stsWebIdentityCreds, err + + case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")) > 0: + if len(endpoint) == 0 { + endpoint = fmt.Sprintf("%s%s", defaultECSRoleEndpoint, + os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")) + } + + roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) + + case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")) > 0: + if len(endpoint) == 0 { + endpoint = os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI") + var ok bool + if ok, err = isLoopback(endpoint); !ok { + if err == nil { + err = fmt.Errorf("uri host is not a loopback address: %s", endpoint) + } + break + } + } + + roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) + + default: + roleCreds, err = getCredentials(m.Client, endpoint) + } + + if err != nil { + return Value{}, err + } + // Expiry window is set to 10secs. + m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + SignerType: SignatureV4, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string + + // Unused params. + LastUpdated time.Time + Type string +} + +// Get the final IAM role URL where the request will +// be sent to fetch the rolling access credentials. +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +func getIAMRoleURL(endpoint string) (*url.URL, error) { + if endpoint == "" { + endpoint = defaultIAMRoleEndpoint + } + + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + u.Path = defaultIAMSecurityCredsPath + return u, nil +} + +// listRoleNames lists of credential role names associated +// with the current EC2 service. If there are no credentials, +// or there is an error making or receiving the request. +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, error) { + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + if token != "" { + req.Header.Add(tokenRequestHeader, token) + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, errors.New(resp.Status) + } + + credsList := []string{} + s := bufio.NewScanner(resp.Body) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, err + } + + return credsList, nil +} + +func getEcsTaskCredentials(client *http.Client, endpoint string, token string) (ec2RoleCredRespBody, error) { + req, err := http.NewRequest(http.MethodGet, endpoint, nil) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + if token != "" { + req.Header.Set("Authorization", token) + } + + resp, err := client.Do(req) + if err != nil { + return ec2RoleCredRespBody{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return ec2RoleCredRespBody{}, errors.New(resp.Status) + } + + respCreds := ec2RoleCredRespBody{} + if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, err + } + + return respCreds, nil +} + +func fetchIMDSToken(client *http.Client, endpoint string) (string, error) { + req, err := http.NewRequest(http.MethodPut, endpoint+tokenPath, nil) + if err != nil { + return "", err + } + req.Header.Add(tokenRequestTTLHeader, tokenTTL) + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", errors.New(resp.Status) + } + return string(data), nil +} + +// getCredentials - obtains the credentials from the IAM role name associated with +// the current EC2 service. +// +// If the credentials cannot be found, or there is an error +// reading the response an error will be returned. +func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + token, _ := fetchIMDSToken(client, endpoint) + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + u, err := getIAMRoleURL(endpoint) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + roleNames, err := listRoleNames(client, u, token) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + if len(roleNames) == 0 { + return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service") + } + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + // - An instance profile can contain only one IAM role. This limit cannot be increased. + roleName := roleNames[0] + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + // The following command retrieves the security credentials for an + // IAM role named `s3access`. + // + // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access + // + u.Path = path.Join(u.Path, roleName) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return ec2RoleCredRespBody{}, err + } + if token != "" { + req.Header.Add(tokenRequestHeader, token) + } + + resp, err := client.Do(req) + if err != nil { + return ec2RoleCredRespBody{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return ec2RoleCredRespBody{}, errors.New(resp.Status) + } + + respCreds := ec2RoleCredRespBody{} + if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, err + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, errors.New(respCreds.Message) + } + + return respCreds, nil +} + +// isLoopback identifies if a uri's host is on a loopback address +func isLoopback(uri string) (bool, error) { + u, err := url.Parse(uri) + if err != nil { + return false, err + } + + host := u.Hostname() + if len(host) == 0 { + return false, fmt.Errorf("can't parse host from uri: %s", uri) + } + + ips, err := net.LookupHost(host) + if err != nil { + return false, err + } + for _, ip := range ips { + if !net.ParseIP(ip).IsLoopback() { + return false, nil + } + } + + return true, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature-type.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature-type.go new file mode 100644 index 00000000..b7943330 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature-type.go @@ -0,0 +1,77 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "strings" + +// SignatureType is type of Authorization requested for a given HTTP request. +type SignatureType int + +// Different types of supported signatures - default is SignatureV4 or SignatureDefault. +const ( + // SignatureDefault is always set to v4. + SignatureDefault SignatureType = iota + SignatureV4 + SignatureV2 + SignatureV4Streaming + SignatureAnonymous // Anonymous signature signifies, no signature. +) + +// IsV2 - is signature SignatureV2? +func (s SignatureType) IsV2() bool { + return s == SignatureV2 +} + +// IsV4 - is signature SignatureV4? +func (s SignatureType) IsV4() bool { + return s == SignatureV4 || s == SignatureDefault +} + +// IsStreamingV4 - is signature SignatureV4Streaming? +func (s SignatureType) IsStreamingV4() bool { + return s == SignatureV4Streaming +} + +// IsAnonymous - is signature empty? +func (s SignatureType) IsAnonymous() bool { + return s == SignatureAnonymous +} + +// Stringer humanized version of signature type, +// strings returned here are case insensitive. +func (s SignatureType) String() string { + if s.IsV2() { + return "S3v2" + } else if s.IsV4() { + return "S3v4" + } else if s.IsStreamingV4() { + return "S3v4Streaming" + } + return "Anonymous" +} + +func parseSignatureType(str string) SignatureType { + if strings.EqualFold(str, "S3v4") { + return SignatureV4 + } else if strings.EqualFold(str, "S3v2") { + return SignatureV2 + } else if strings.EqualFold(str, "S3v4Streaming") { + return SignatureV4Streaming + } + return SignatureAnonymous +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go new file mode 100644 index 00000000..7dde00b0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go @@ -0,0 +1,67 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +// A Static is a set of credentials which are set programmatically, +// and will never expire. +type Static struct { + Value +} + +// NewStaticV2 returns a pointer to a new Credentials object +// wrapping a static credentials value provider, signature is +// set to v2. If access and secret are not specified then +// regardless of signature type set it Value will return +// as anonymous. +func NewStaticV2(id, secret, token string) *Credentials { + return NewStatic(id, secret, token, SignatureV2) +} + +// NewStaticV4 is similar to NewStaticV2 with similar considerations. +func NewStaticV4(id, secret, token string) *Credentials { + return NewStatic(id, secret, token, SignatureV4) +} + +// NewStatic returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStatic(id, secret, token string, signerType SignatureType) *Credentials { + return New(&Static{ + Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + SignerType: signerType, + }, + }) +} + +// Retrieve returns the static credentials. +func (s *Static) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + // Anonymous is not an error + return Value{SignerType: SignatureAnonymous}, nil + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For Static, the credentials never expired. +func (s *Static) IsExpired() bool { + return false +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go new file mode 100644 index 00000000..b79f920f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go @@ -0,0 +1,162 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/xml" + "errors" + "fmt" + "net/http" + "net/url" + "time" +) + +// AssumedRoleUser - The identifiers for the temporary security credentials that +// the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser +type AssumedRoleUser struct { + Arn string + AssumedRoleID string `xml:"AssumeRoleId"` +} + +// AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request. +type AssumeRoleWithClientGrantsResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"` + Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants +// request, including temporary credentials that can be used to make MinIO API requests. +type ClientGrantsResult struct { + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + Audience string `xml:",omitempty"` + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + PackedPolicySize int `xml:",omitempty"` + Provider string `xml:",omitempty"` + SubjectFromClientGrantsToken string `xml:",omitempty"` +} + +// ClientGrantsToken - client grants token with expiry. +type ClientGrantsToken struct { + Token string + Expiry int +} + +// A STSClientGrants retrieves credentials from MinIO service, and keeps track if +// those credentials are expired. +type STSClientGrants struct { + Expiry + + // Required http Client to use when connecting to MinIO STS service. + Client *http.Client + + // MinIO endpoint to fetch STS credentials. + STSEndpoint string + + // getClientGrantsTokenExpiry function to retrieve tokens + // from IDP This function should return two values one is + // accessToken which is a self contained access token (JWT) + // and second return value is the expiry associated with + // this token. This is a customer provided function and + // is mandatory. + GetClientGrantsTokenExpiry func() (*ClientGrantsToken, error) +} + +// NewSTSClientGrants returns a pointer to a new +// Credentials object wrapping the STSClientGrants. +func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) { + if stsEndpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if getClientGrantsTokenExpiry == nil { + return nil, errors.New("Client grants access token and expiry retrieval function should be defined") + } + return New(&STSClientGrants{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + STSEndpoint: stsEndpoint, + GetClientGrantsTokenExpiry: getClientGrantsTokenExpiry, + }), nil +} + +func getClientGrantsCredentials(clnt *http.Client, endpoint string, + getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (AssumeRoleWithClientGrantsResponse, error) { + + accessToken, err := getClientGrantsTokenExpiry() + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithClientGrants") + v.Set("Token", accessToken.Token) + v.Set("DurationSeconds", fmt.Sprintf("%d", accessToken.Expiry)) + v.Set("Version", STSVersion) + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + u.RawQuery = v.Encode() + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return AssumeRoleWithClientGrantsResponse{}, errors.New(resp.Status) + } + + a := AssumeRoleWithClientGrantsResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + return a, nil +} + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSClientGrants) Retrieve() (Value, error) { + a, err := getClientGrantsCredentials(m.Client, m.STSEndpoint, m.GetClientGrantsTokenExpiry) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + SignerType: SignatureV4, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go new file mode 100644 index 00000000..bcb3c36a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go @@ -0,0 +1,124 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/xml" + "errors" + "net/http" + "net/url" + "time" +) + +// AssumeRoleWithLDAPResponse contains the result of successful +// AssumeRoleWithLDAPIdentity request +type AssumeRoleWithLDAPResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse" json:"-"` + Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// LDAPIdentityResult - contains credentials for a successful +// AssumeRoleWithLDAPIdentity request. +type LDAPIdentityResult struct { + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + + SubjectFromToken string `xml:",omitempty"` +} + +// LDAPIdentity retrieves credentials from MinIO +type LDAPIdentity struct { + Expiry + + // Required http Client to use when connecting to MinIO STS service. + Client *http.Client + + // Exported STS endpoint to fetch STS credentials. + STSEndpoint string + + // LDAP username/password used to fetch LDAP STS credentials. + LDAPUsername, LDAPPassword string +} + +// NewLDAPIdentity returns new credentials object that uses LDAP +// Identity. +func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string) (*Credentials, error) { + return New(&LDAPIdentity{ + Client: &http.Client{Transport: http.DefaultTransport}, + STSEndpoint: stsEndpoint, + LDAPUsername: ldapUsername, + LDAPPassword: ldapPassword, + }), nil +} + +// Retrieve gets the credential by calling the MinIO STS API for +// LDAP on the configured stsEndpoint. +func (k *LDAPIdentity) Retrieve() (value Value, err error) { + u, kerr := url.Parse(k.STSEndpoint) + if kerr != nil { + err = kerr + return + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithLDAPIdentity") + v.Set("Version", STSVersion) + v.Set("LDAPUsername", k.LDAPUsername) + v.Set("LDAPPassword", k.LDAPPassword) + + u.RawQuery = v.Encode() + + req, kerr := http.NewRequest(http.MethodPost, u.String(), nil) + if kerr != nil { + err = kerr + return + } + + resp, kerr := k.Client.Do(req) + if kerr != nil { + err = kerr + return + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + err = errors.New(resp.Status) + return + } + + r := AssumeRoleWithLDAPResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil { + return + } + + cr := r.Result.Credentials + k.SetExpiration(cr.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: cr.AccessKey, + SecretAccessKey: cr.SecretKey, + SessionToken: cr.SessionToken, + SignerType: SignatureV4, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go new file mode 100644 index 00000000..161ffd36 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go @@ -0,0 +1,181 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/xml" + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "time" +) + +// AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request. +type AssumeRoleWithWebIdentityResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"` + Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity +// request, including temporary credentials that can be used to make MinIO API requests. +type WebIdentityResult struct { + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + Audience string `xml:",omitempty"` + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + PackedPolicySize int `xml:",omitempty"` + Provider string `xml:",omitempty"` + SubjectFromWebIdentityToken string `xml:",omitempty"` +} + +// WebIdentityToken - web identity token with expiry. +type WebIdentityToken struct { + Token string + Expiry int +} + +// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if +// those credentials are expired. +type STSWebIdentity struct { + Expiry + + // Required http Client to use when connecting to MinIO STS service. + Client *http.Client + + // Exported STS endpoint to fetch STS credentials. + STSEndpoint string + + // Exported GetWebIDTokenExpiry function which returns ID + // tokens from IDP. This function should return two values + // one is ID token which is a self contained ID token (JWT) + // and second return value is the expiry associated with + // this token. + // This is a customer provided function and is mandatory. + GetWebIDTokenExpiry func() (*WebIdentityToken, error) + + // roleARN is the Amazon Resource Name (ARN) of the role that the caller is + // assuming. + roleARN string + + // roleSessionName is the identifier for the assumed role session. + roleSessionName string +} + +// NewSTSWebIdentity returns a pointer to a new +// Credentials object wrapping the STSWebIdentity. +func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) { + if stsEndpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if getWebIDTokenExpiry == nil { + return nil, errors.New("Web ID token and expiry retrieval function should be defined") + } + return New(&STSWebIdentity{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + STSEndpoint: stsEndpoint, + GetWebIDTokenExpiry: getWebIDTokenExpiry, + }), nil +} + +func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string, + getWebIDTokenExpiry func() (*WebIdentityToken, error)) (AssumeRoleWithWebIdentityResponse, error) { + idToken, err := getWebIDTokenExpiry() + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithWebIdentity") + if len(roleARN) > 0 { + v.Set("RoleArn", roleARN) + + if len(roleSessionName) == 0 { + roleSessionName = strconv.FormatInt(time.Now().UnixNano(), 10) + } + v.Set("RoleSessionName", roleSessionName) + } + v.Set("WebIdentityToken", idToken.Token) + if idToken.Expiry > 0 { + v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry)) + } + v.Set("Version", STSVersion) + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + u.RawQuery = v.Encode() + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return AssumeRoleWithWebIdentityResponse{}, errors.New(resp.Status) + } + + a := AssumeRoleWithWebIdentityResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + return a, nil +} + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSWebIdentity) Retrieve() (Value, error) { + a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.roleARN, m.roleSessionName, m.GetWebIDTokenExpiry) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + SignerType: SignatureV4, + }, nil +} + +// Expiration returns the expiration time of the credentials +func (m *STSWebIdentity) Expiration() time.Time { + return m.expiration +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go new file mode 100644 index 00000000..ce7d2153 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go @@ -0,0 +1,198 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +import ( + "crypto/md5" + "encoding/base64" + "errors" + "net/http" + + jsoniter "github.com/json-iterator/go" + "golang.org/x/crypto/argon2" +) + +const ( + // sseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS. + sseGenericHeader = "X-Amz-Server-Side-Encryption" + + // sseKmsKeyID is the AWS SSE-KMS key id. + sseKmsKeyID = sseGenericHeader + "-Aws-Kms-Key-Id" + // sseEncryptionContext is the AWS SSE-KMS Encryption Context data. + sseEncryptionContext = sseGenericHeader + "-Context" + + // sseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key. + sseCustomerAlgorithm = sseGenericHeader + "-Customer-Algorithm" + // sseCustomerKey is the AWS SSE-C encryption key HTTP header key. + sseCustomerKey = sseGenericHeader + "-Customer-Key" + // sseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key. + sseCustomerKeyMD5 = sseGenericHeader + "-Customer-Key-MD5" + + // sseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API. + sseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" + // sseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API. + sseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" + // sseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API. + sseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5" +) + +// PBKDF creates a SSE-C key from the provided password and salt. +// PBKDF is a password-based key derivation function +// which can be used to derive a high-entropy cryptographic +// key from a low-entropy password and a salt. +type PBKDF func(password, salt []byte) ServerSide + +// DefaultPBKDF is the default PBKDF. It uses Argon2id with the +// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads). +var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { + sse := ssec{} + copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32)) + return sse +} + +// Type is the server-side-encryption method. It represents one of +// the following encryption methods: +// - SSE-C: server-side-encryption with customer provided keys +// - KMS: server-side-encryption with managed keys +// - S3: server-side-encryption using S3 storage encryption +type Type string + +const ( + // SSEC represents server-side-encryption with customer provided keys + SSEC Type = "SSE-C" + // KMS represents server-side-encryption with managed keys + KMS Type = "KMS" + // S3 represents server-side-encryption using S3 storage encryption + S3 Type = "S3" +) + +// ServerSide is a form of S3 server-side-encryption. +type ServerSide interface { + // Type returns the server-side-encryption method. + Type() Type + + // Marshal adds encryption headers to the provided HTTP headers. + // It marks an HTTP request as server-side-encryption request + // and inserts the required data into the headers. + Marshal(h http.Header) +} + +// NewSSE returns a server-side-encryption using S3 storage encryption. +// Using SSE-S3 the server will encrypt the object with server-managed keys. +func NewSSE() ServerSide { return s3{} } + +// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context. +func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) { + if context == nil { + return kms{key: keyID, hasContext: false}, nil + } + var json = jsoniter.ConfigCompatibleWithStandardLibrary + serializedContext, err := json.Marshal(context) + if err != nil { + return nil, err + } + return kms{key: keyID, context: serializedContext, hasContext: true}, nil +} + +// NewSSEC returns a new server-side-encryption using SSE-C and the provided key. +// The key must be 32 bytes long. +func NewSSEC(key []byte) (ServerSide, error) { + if len(key) != 32 { + return nil, errors.New("encrypt: SSE-C key must be 256 bit long") + } + sse := ssec{} + copy(sse[:], key) + return sse, nil +} + +// SSE transforms a SSE-C copy encryption into a SSE-C encryption. +// It is the inverse of SSECopy(...). +// +// If the provided sse is no SSE-C copy encryption SSE returns +// sse unmodified. +func SSE(sse ServerSide) ServerSide { + if sse == nil || sse.Type() != SSEC { + return sse + } + if sse, ok := sse.(ssecCopy); ok { + return ssec(sse) + } + return sse +} + +// SSECopy transforms a SSE-C encryption into a SSE-C copy +// encryption. This is required for SSE-C key rotation or a SSE-C +// copy where the source and the destination should be encrypted. +// +// If the provided sse is no SSE-C encryption SSECopy returns +// sse unmodified. +func SSECopy(sse ServerSide) ServerSide { + if sse == nil || sse.Type() != SSEC { + return sse + } + if sse, ok := sse.(ssec); ok { + return ssecCopy(sse) + } + return sse +} + +type ssec [32]byte + +func (s ssec) Type() Type { return SSEC } + +func (s ssec) Marshal(h http.Header) { + keyMD5 := md5.Sum(s[:]) + h.Set(sseCustomerAlgorithm, "AES256") + h.Set(sseCustomerKey, base64.StdEncoding.EncodeToString(s[:])) + h.Set(sseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) +} + +type ssecCopy [32]byte + +func (s ssecCopy) Type() Type { return SSEC } + +func (s ssecCopy) Marshal(h http.Header) { + keyMD5 := md5.Sum(s[:]) + h.Set(sseCopyCustomerAlgorithm, "AES256") + h.Set(sseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:])) + h.Set(sseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) +} + +type s3 struct{} + +func (s s3) Type() Type { return S3 } + +func (s s3) Marshal(h http.Header) { h.Set(sseGenericHeader, "AES256") } + +type kms struct { + key string + context []byte + hasContext bool +} + +func (s kms) Type() Type { return KMS } + +func (s kms) Marshal(h http.Header) { + h.Set(sseGenericHeader, "aws:kms") + if s.key != "" { + h.Set(sseKmsKeyID, s.key) + } + if s.hasContext { + h.Set(sseEncryptionContext, base64.StdEncoding.EncodeToString(s.context)) + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go new file mode 100644 index 00000000..b6f9601b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go @@ -0,0 +1,303 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package lifecycle contains all the lifecycle related data types and marshallers. +package lifecycle + +import ( + "encoding/xml" + "time" +) + +// AbortIncompleteMultipartUpload structure, not supported yet on MinIO +type AbortIncompleteMultipartUpload struct { + XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"` + DaysAfterInitiation ExpirationDays `xml:"DaysAfterInitiation,omitempty" json:"DaysAfterInitiation,omitempty"` +} + +// IsDaysNull returns true if days field is null +func (n AbortIncompleteMultipartUpload) IsDaysNull() bool { + return n.DaysAfterInitiation == ExpirationDays(0) +} + +// MarshalXML if days after initiation is set to non-zero value +func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.IsDaysNull() { + return nil + } + type abortIncompleteMultipartUploadWrapper AbortIncompleteMultipartUpload + return e.EncodeElement(abortIncompleteMultipartUploadWrapper(n), start) +} + +// NoncurrentVersionExpiration - Specifies when noncurrent object versions expire. +// Upon expiration, server permanently deletes the noncurrent object versions. +// Set this lifecycle configuration action on a bucket that has versioning enabled +// (or suspended) to request server delete noncurrent object versions at a +// specific period in the object's lifetime. +type NoncurrentVersionExpiration struct { + XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"` +} + +// MarshalXML if non-current days not set to non zero value +func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.IsDaysNull() { + return nil + } + type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration + return e.EncodeElement(noncurrentVersionExpirationWrapper(n), start) +} + +// IsDaysNull returns true if days field is null +func (n NoncurrentVersionExpiration) IsDaysNull() bool { + return n.NoncurrentDays == ExpirationDays(0) +} + +// NoncurrentVersionTransition structure, set this action to request server to +// transition noncurrent object versions to different set storage classes +// at a specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"` + StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"` +} + +// IsDaysNull returns true if days field is null +func (n NoncurrentVersionTransition) IsDaysNull() bool { + return n.NoncurrentDays == ExpirationDays(0) +} + +// IsStorageClassEmpty returns true if storage class field is empty +func (n NoncurrentVersionTransition) IsStorageClassEmpty() bool { + return n.StorageClass == "" +} + +// MarshalXML is extended to leave out +// tags +func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.IsDaysNull() || n.IsStorageClassEmpty() { + return nil + } + type noncurrentVersionTransitionWrapper NoncurrentVersionTransition + return e.EncodeElement(noncurrentVersionTransitionWrapper(n), start) +} + +// Tag structure key/value pair representing an object tag to apply lifecycle configuration +type Tag struct { + XMLName xml.Name `xml:"Tag,omitempty" json:"-"` + Key string `xml:"Key,omitempty" json:"Key,omitempty"` + Value string `xml:"Value,omitempty" json:"Value,omitempty"` +} + +// IsEmpty returns whether this tag is empty or not. +func (tag Tag) IsEmpty() bool { + return tag.Key == "" +} + +// Transition structure - transition details of lifecycle configuration +type Transition struct { + XMLName xml.Name `xml:"Transition" json:"-"` + Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` + StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` + Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"` +} + +// IsDaysNull returns true if days field is null +func (t Transition) IsDaysNull() bool { + return t.Days == ExpirationDays(0) +} + +// IsDateNull returns true if date field is null +func (t Transition) IsDateNull() bool { + return t.Date.Time.IsZero() +} + +// IsNull returns true if both date and days fields are null +func (t Transition) IsNull() bool { + return t.IsDaysNull() && t.IsDateNull() +} + +// MarshalXML is transition is non null +func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error { + if t.IsNull() { + return nil + } + type transitionWrapper Transition + return en.EncodeElement(transitionWrapper(t), startElement) +} + +// And And Rule for LifecycleTag, to be used in LifecycleRuleFilter +type And struct { + XMLName xml.Name `xml:"And" json:"-"` + Prefix string `xml:"Prefix" json:"Prefix,omitempty"` + Tags []Tag `xml:"Tag" json:"Tags,omitempty"` +} + +// IsEmpty returns true if Tags field is null +func (a And) IsEmpty() bool { + return len(a.Tags) == 0 && a.Prefix == "" +} + +// Filter will be used in selecting rule(s) for lifecycle configuration +type Filter struct { + XMLName xml.Name `xml:"Filter" json:"-"` + And And `xml:"And,omitempty" json:"And,omitempty"` + Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` + Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` +} + +// MarshalXML - produces the xml representation of the Filter struct +// only one of Prefix, And and Tag should be present in the output. +func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + switch { + case !f.And.IsEmpty(): + if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil { + return err + } + case !f.Tag.IsEmpty(): + if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil { + return err + } + default: + // Always print Prefix field when both And & Tag are empty + if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// ExpirationDays is a type alias to unmarshal Days in Expiration +type ExpirationDays int + +// MarshalXML encodes number of days to expire if it is non-zero and +// encodes empty string otherwise +func (eDays ExpirationDays) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if eDays == 0 { + return nil + } + return e.EncodeElement(int(eDays), startElement) +} + +// ExpirationDate is a embedded type containing time.Time to unmarshal +// Date in Expiration +type ExpirationDate struct { + time.Time +} + +// MarshalXML encodes expiration date if it is non-zero and encodes +// empty string otherwise +func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if eDate.Time.IsZero() { + return nil + } + return e.EncodeElement(eDate.Format(time.RFC3339), startElement) +} + +// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element. +type ExpireDeleteMarker bool + +// MarshalXML encodes delete marker boolean into an XML form. +func (b ExpireDeleteMarker) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if !b { + return nil + } + type expireDeleteMarkerWrapper ExpireDeleteMarker + return e.EncodeElement(expireDeleteMarkerWrapper(b), startElement) +} + +// IsEnabled returns true if the auto delete-marker expiration is enabled +func (b ExpireDeleteMarker) IsEnabled() bool { + return bool(b) +} + +// Expiration structure - expiration details of lifecycle configuration +type Expiration struct { + XMLName xml.Name `xml:"Expiration,omitempty" json:"-"` + Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` + Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"` + DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty"` +} + +// IsDaysNull returns true if days field is null +func (e Expiration) IsDaysNull() bool { + return e.Days == ExpirationDays(0) +} + +// IsDateNull returns true if date field is null +func (e Expiration) IsDateNull() bool { + return e.Date.Time.IsZero() +} + +// IsDeleteMarkerExpirationEnabled returns true if the auto-expiration of delete marker is enabled +func (e Expiration) IsDeleteMarkerExpirationEnabled() bool { + return e.DeleteMarker.IsEnabled() +} + +// IsNull returns true if both date and days fields are null +func (e Expiration) IsNull() bool { + return e.IsDaysNull() && e.IsDateNull() && !e.IsDeleteMarkerExpirationEnabled() +} + +// MarshalXML is expiration is non null +func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error { + if e.IsNull() { + return nil + } + type expirationWrapper Expiration + return en.EncodeElement(expirationWrapper(e), startElement) +} + +// Rule represents a single rule in lifecycle configuration +type Rule struct { + XMLName xml.Name `xml:"Rule,omitempty" json:"-"` + AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"` + Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"` + ID string `xml:"ID" json:"ID"` + RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"` + NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"` + NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty" json:"NoncurrentVersionTransition,omitempty"` + Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` + Status string `xml:"Status" json:"Status"` + Transition Transition `xml:"Transition,omitempty" json:"Transition,omitempty"` +} + +// Configuration is a collection of Rule objects. +type Configuration struct { + XMLName xml.Name `xml:"LifecycleConfiguration,omitempty" json:"-"` + Rules []Rule `xml:"Rule"` +} + +// Empty check if lifecycle configuration is empty +func (c *Configuration) Empty() bool { + if c == nil { + return true + } + return len(c.Rules) == 0 +} + +// NewConfiguration initializes a fresh lifecycle configuration +// for manipulation, such as setting and removing lifecycle rules +// and filters. +func NewConfiguration() *Configuration { + return &Configuration{} +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go new file mode 100644 index 00000000..d0a47163 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go @@ -0,0 +1,78 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package notification + +// Indentity represents the user id, this is a compliance field. +type identity struct { + PrincipalID string `json:"principalId"` +} + +// event bucket metadata. +type bucketMeta struct { + Name string `json:"name"` + OwnerIdentity identity `json:"ownerIdentity"` + ARN string `json:"arn"` +} + +// event object metadata. +type objectMeta struct { + Key string `json:"key"` + Size int64 `json:"size,omitempty"` + ETag string `json:"eTag,omitempty"` + ContentType string `json:"contentType,omitempty"` + UserMetadata map[string]string `json:"userMetadata,omitempty"` + VersionID string `json:"versionId,omitempty"` + Sequencer string `json:"sequencer"` +} + +// event server specific metadata. +type eventMeta struct { + SchemaVersion string `json:"s3SchemaVersion"` + ConfigurationID string `json:"configurationId"` + Bucket bucketMeta `json:"bucket"` + Object objectMeta `json:"object"` +} + +// sourceInfo represents information on the client that +// triggered the event notification. +type sourceInfo struct { + Host string `json:"host"` + Port string `json:"port"` + UserAgent string `json:"userAgent"` +} + +// Event represents an Amazon an S3 bucket notification event. +type Event struct { + EventVersion string `json:"eventVersion"` + EventSource string `json:"eventSource"` + AwsRegion string `json:"awsRegion"` + EventTime string `json:"eventTime"` + EventName string `json:"eventName"` + UserIdentity identity `json:"userIdentity"` + RequestParameters map[string]string `json:"requestParameters"` + ResponseElements map[string]string `json:"responseElements"` + S3 eventMeta `json:"s3"` + Source sourceInfo `json:"source"` +} + +// Info - represents the collection of notification events, additionally +// also reports errors if any while listening on bucket notifications. +type Info struct { + Records []Event + Err error +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go new file mode 100644 index 00000000..b17e6c54 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go @@ -0,0 +1,395 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package notification + +import ( + "encoding/xml" + "errors" + "fmt" + + "github.com/minio/minio-go/v7/pkg/set" +) + +// EventType is a S3 notification event associated to the bucket notification configuration +type EventType string + +// The role of all event types are described in : +// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations +const ( + ObjectCreatedAll EventType = "s3:ObjectCreated:*" + ObjectCreatedPut = "s3:ObjectCreated:Put" + ObjectCreatedPost = "s3:ObjectCreated:Post" + ObjectCreatedCopy = "s3:ObjectCreated:Copy" + ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + ObjectAccessedGet = "s3:ObjectAccessed:Get" + ObjectAccessedHead = "s3:ObjectAccessed:Head" + ObjectAccessedAll = "s3:ObjectAccessed:*" + ObjectRemovedAll = "s3:ObjectRemoved:*" + ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + BucketCreatedAll = "s3:BucketCreated:*" + BucketRemovedAll = "s3:BucketRemoved:*" +) + +// FilterRule - child of S3Key, a tag in the notification xml which +// carries suffix/prefix filters +type FilterRule struct { + Name string `xml:"Name"` + Value string `xml:"Value"` +} + +// S3Key - child of Filter, a tag in the notification xml which +// carries suffix/prefix filters +type S3Key struct { + FilterRules []FilterRule `xml:"FilterRule,omitempty"` +} + +// Filter - a tag in the notification xml structure which carries +// suffix/prefix filters +type Filter struct { + S3Key S3Key `xml:"S3Key,omitempty"` +} + +// Arn - holds ARN information that will be sent to the web service, +// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +type Arn struct { + Partition string + Service string + Region string + AccountID string + Resource string +} + +// NewArn creates new ARN based on the given partition, service, region, account id and resource +func NewArn(partition, service, region, accountID, resource string) Arn { + return Arn{Partition: partition, + Service: service, + Region: region, + AccountID: accountID, + Resource: resource} +} + +// String returns the string format of the ARN +func (arn Arn) String() string { + return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource +} + +// Config - represents one single notification configuration +// such as topic, queue or lambda configuration. +type Config struct { + ID string `xml:"Id,omitempty"` + Arn Arn `xml:"-"` + Events []EventType `xml:"Event"` + Filter *Filter `xml:"Filter,omitempty"` +} + +// NewConfig creates one notification config and sets the given ARN +func NewConfig(arn Arn) Config { + return Config{Arn: arn, Filter: &Filter{}} +} + +// AddEvents adds one event to the current notification config +func (t *Config) AddEvents(events ...EventType) { + t.Events = append(t.Events, events...) +} + +// AddFilterSuffix sets the suffix configuration to the current notification config +func (t *Config) AddFilterSuffix(suffix string) { + if t.Filter == nil { + t.Filter = &Filter{} + } + newFilterRule := FilterRule{Name: "suffix", Value: suffix} + // Replace any suffix rule if existing and add to the list otherwise + for index := range t.Filter.S3Key.FilterRules { + if t.Filter.S3Key.FilterRules[index].Name == "suffix" { + t.Filter.S3Key.FilterRules[index] = newFilterRule + return + } + } + t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) +} + +// AddFilterPrefix sets the prefix configuration to the current notification config +func (t *Config) AddFilterPrefix(prefix string) { + if t.Filter == nil { + t.Filter = &Filter{} + } + newFilterRule := FilterRule{Name: "prefix", Value: prefix} + // Replace any prefix rule if existing and add to the list otherwise + for index := range t.Filter.S3Key.FilterRules { + if t.Filter.S3Key.FilterRules[index].Name == "prefix" { + t.Filter.S3Key.FilterRules[index] = newFilterRule + return + } + } + t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) +} + +// EqualEventTypeList tells whether a and b contain the same events +func EqualEventTypeList(a, b []EventType) bool { + if len(a) != len(b) { + return false + } + setA := set.NewStringSet() + for _, i := range a { + setA.Add(string(i)) + } + + setB := set.NewStringSet() + for _, i := range b { + setB.Add(string(i)) + } + + return setA.Difference(setB).IsEmpty() +} + +// EqualFilterRuleList tells whether a and b contain the same filters +func EqualFilterRuleList(a, b []FilterRule) bool { + if len(a) != len(b) { + return false + } + + setA := set.NewStringSet() + for _, i := range a { + setA.Add(fmt.Sprintf("%s-%s", i.Name, i.Value)) + } + + setB := set.NewStringSet() + for _, i := range b { + setB.Add(fmt.Sprintf("%s-%s", i.Name, i.Value)) + } + + return setA.Difference(setB).IsEmpty() +} + +// Equal returns whether this `Config` is equal to another defined by the passed parameters +func (t *Config) Equal(events []EventType, prefix, suffix string) bool { + if t == nil { + return false + } + + // Compare events + passEvents := EqualEventTypeList(t.Events, events) + + // Compare filters + var newFilterRules []FilterRule + if prefix != "" { + newFilterRules = append(newFilterRules, FilterRule{Name: "prefix", Value: prefix}) + } + if suffix != "" { + newFilterRules = append(newFilterRules, FilterRule{Name: "suffix", Value: suffix}) + } + + var currentFilterRules []FilterRule + if t.Filter != nil { + currentFilterRules = t.Filter.S3Key.FilterRules + } + + passFilters := EqualFilterRuleList(currentFilterRules, newFilterRules) + return passEvents && passFilters +} + +// TopicConfig carries one single topic notification configuration +type TopicConfig struct { + Config + Topic string `xml:"Topic"` +} + +// QueueConfig carries one single queue notification configuration +type QueueConfig struct { + Config + Queue string `xml:"Queue"` +} + +// LambdaConfig carries one single cloudfunction notification configuration +type LambdaConfig struct { + Config + Lambda string `xml:"CloudFunction"` +} + +// Configuration - the struct that represents the whole XML to be sent to the web service +type Configuration struct { + XMLName xml.Name `xml:"NotificationConfiguration"` + LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"` + TopicConfigs []TopicConfig `xml:"TopicConfiguration"` + QueueConfigs []QueueConfig `xml:"QueueConfiguration"` +} + +// AddTopic adds a given topic config to the general bucket notification config +func (b *Configuration) AddTopic(topicConfig Config) bool { + newTopicConfig := TopicConfig{Config: topicConfig, Topic: topicConfig.Arn.String()} + for _, n := range b.TopicConfigs { + // If new config matches existing one + if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range topicConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.TopicConfigs = append(b.TopicConfigs, newTopicConfig) + return true +} + +// AddQueue adds a given queue config to the general bucket notification config +func (b *Configuration) AddQueue(queueConfig Config) bool { + newQueueConfig := QueueConfig{Config: queueConfig, Queue: queueConfig.Arn.String()} + for _, n := range b.QueueConfigs { + if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range queueConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.QueueConfigs = append(b.QueueConfigs, newQueueConfig) + return true +} + +// AddLambda adds a given lambda config to the general bucket notification config +func (b *Configuration) AddLambda(lambdaConfig Config) bool { + newLambdaConfig := LambdaConfig{Config: lambdaConfig, Lambda: lambdaConfig.Arn.String()} + for _, n := range b.LambdaConfigs { + if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range lambdaConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig) + return true +} + +// RemoveTopicByArn removes all topic configurations that match the exact specified ARN +func (b *Configuration) RemoveTopicByArn(arn Arn) { + var topics []TopicConfig + for _, topic := range b.TopicConfigs { + if topic.Topic != arn.String() { + topics = append(topics, topic) + } + } + b.TopicConfigs = topics +} + +// ErrNoConfigMatch is returned when a notification configuration (sqs,sns,lambda) is not found when trying to delete +var ErrNoConfigMatch = errors.New("no notification configuration matched") + +// RemoveTopicByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix +func (b *Configuration) RemoveTopicByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { + removeIndex := -1 + for i, v := range b.TopicConfigs { + // if it matches events and filters, mark the index for deletion + if v.Topic == arn.String() && v.Config.Equal(events, prefix, suffix) { + removeIndex = i + break // since we have at most one matching config + } + } + if removeIndex >= 0 { + b.TopicConfigs = append(b.TopicConfigs[:removeIndex], b.TopicConfigs[removeIndex+1:]...) + return nil + } + return ErrNoConfigMatch +} + +// RemoveQueueByArn removes all queue configurations that match the exact specified ARN +func (b *Configuration) RemoveQueueByArn(arn Arn) { + var queues []QueueConfig + for _, queue := range b.QueueConfigs { + if queue.Queue != arn.String() { + queues = append(queues, queue) + } + } + b.QueueConfigs = queues +} + +// RemoveQueueByArnEventsPrefixSuffix removes a queue configuration that match the exact specified ARN, events, prefix and suffix +func (b *Configuration) RemoveQueueByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { + removeIndex := -1 + for i, v := range b.QueueConfigs { + // if it matches events and filters, mark the index for deletion + if v.Queue == arn.String() && v.Config.Equal(events, prefix, suffix) { + removeIndex = i + break // since we have at most one matching config + } + } + if removeIndex >= 0 { + b.QueueConfigs = append(b.QueueConfigs[:removeIndex], b.QueueConfigs[removeIndex+1:]...) + return nil + } + return ErrNoConfigMatch +} + +// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN +func (b *Configuration) RemoveLambdaByArn(arn Arn) { + var lambdas []LambdaConfig + for _, lambda := range b.LambdaConfigs { + if lambda.Lambda != arn.String() { + lambdas = append(lambdas, lambda) + } + } + b.LambdaConfigs = lambdas +} + +// RemoveLambdaByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix +func (b *Configuration) RemoveLambdaByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { + removeIndex := -1 + for i, v := range b.LambdaConfigs { + // if it matches events and filters, mark the index for deletion + if v.Lambda == arn.String() && v.Config.Equal(events, prefix, suffix) { + removeIndex = i + break // since we have at most one matching config + } + } + if removeIndex >= 0 { + b.LambdaConfigs = append(b.LambdaConfigs[:removeIndex], b.LambdaConfigs[removeIndex+1:]...) + return nil + } + return ErrNoConfigMatch +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go new file mode 100644 index 00000000..beacc71f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -0,0 +1,696 @@ +/* + * MinIO Client (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package replication + +import ( + "bytes" + "encoding/xml" + "fmt" + "strconv" + "strings" + "unicode/utf8" + + "github.com/rs/xid" +) + +var errInvalidFilter = fmt.Errorf("invalid filter") + +// OptionType specifies operation to be performed on config +type OptionType string + +const ( + // AddOption specifies addition of rule to config + AddOption OptionType = "Add" + // SetOption specifies modification of existing rule to config + SetOption OptionType = "Set" + + // RemoveOption specifies rule options are for removing a rule + RemoveOption OptionType = "Remove" + // ImportOption is for getting current config + ImportOption OptionType = "Import" +) + +// Options represents options to set a replication configuration rule +type Options struct { + Op OptionType + ID string + Prefix string + RuleStatus string + Priority string + TagString string + StorageClass string + RoleArn string + DestBucket string + IsTagSet bool + IsSCSet bool + ReplicateDeletes string // replicate versioned deletes + ReplicateDeleteMarkers string // replicate soft deletes + ReplicaSync string // replicate replica metadata modifications + ExistingObjectReplicate string +} + +// Tags returns a slice of tags for a rule +func (opts Options) Tags() ([]Tag, error) { + var tagList []Tag + tagTokens := strings.Split(opts.TagString, "&") + for _, tok := range tagTokens { + if tok == "" { + break + } + kv := strings.SplitN(tok, "=", 2) + if len(kv) != 2 { + return []Tag{}, fmt.Errorf("tags should be entered as comma separated k=v pairs") + } + tagList = append(tagList, Tag{ + Key: kv[0], + Value: kv[1], + }) + } + return tagList, nil +} + +// Config - replication configuration specified in +// https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html +type Config struct { + XMLName xml.Name `xml:"ReplicationConfiguration" json:"-"` + Rules []Rule `xml:"Rule" json:"Rules"` + Role string `xml:"Role" json:"Role"` +} + +// Empty returns true if config is not set +func (c *Config) Empty() bool { + return len(c.Rules) == 0 +} + +// AddRule adds a new rule to existing replication config. If a rule exists with the +// same ID, then the rule is replaced. +func (c *Config) AddRule(opts Options) error { + priority, err := strconv.Atoi(opts.Priority) + if err != nil { + return err + } + if opts.RoleArn != c.Role && c.Role != "" { + return fmt.Errorf("role ARN does not match existing configuration") + } + var status Status + // toggle rule status for edit option + switch opts.RuleStatus { + case "enable": + status = Enabled + case "disable": + status = Disabled + default: + return fmt.Errorf("rule state should be either [enable|disable]") + } + + tags, err := opts.Tags() + if err != nil { + return err + } + andVal := And{ + Tags: tags, + } + filter := Filter{Prefix: opts.Prefix} + // only a single tag is set. + if opts.Prefix == "" && len(tags) == 1 { + filter.Tag = tags[0] + } + // both prefix and tag are present + if len(andVal.Tags) > 1 || opts.Prefix != "" { + filter.And = andVal + filter.And.Prefix = opts.Prefix + filter.Prefix = "" + filter.Tag = Tag{} + } + if opts.ID == "" { + opts.ID = xid.New().String() + } + arnStr := opts.RoleArn + if opts.RoleArn == "" { + arnStr = c.Role + } + if arnStr == "" { + return fmt.Errorf("role ARN required") + } + tokens := strings.Split(arnStr, ":") + if len(tokens) != 6 { + return fmt.Errorf("invalid format for replication Arn") + } + if c.Role == "" { + c.Role = arnStr + } + destBucket := opts.DestBucket + // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html + if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 { + if len(btokens) == 1 { + destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket) + } else { + return fmt.Errorf("destination bucket needs to be in Arn format") + } + } + dmStatus := Disabled + if opts.ReplicateDeleteMarkers != "" { + switch opts.ReplicateDeleteMarkers { + case "enable": + dmStatus = Enabled + case "disable": + dmStatus = Disabled + default: + return fmt.Errorf("ReplicateDeleteMarkers should be either enable|disable") + } + } + + vDeleteStatus := Disabled + if opts.ReplicateDeletes != "" { + switch opts.ReplicateDeletes { + case "enable": + vDeleteStatus = Enabled + case "disable": + vDeleteStatus = Disabled + default: + return fmt.Errorf("ReplicateDeletes should be either enable|disable") + } + } + var replicaSync Status + // replica sync is by default Enabled, unless specified. + switch opts.ReplicaSync { + case "enable", "": + replicaSync = Enabled + case "disable": + replicaSync = Disabled + default: + return fmt.Errorf("replica metadata sync should be either [enable|disable]") + } + + var existingStatus Status + if opts.ExistingObjectReplicate != "" { + switch opts.ExistingObjectReplicate { + case "enable": + existingStatus = Enabled + case "disable", "": + existingStatus = Disabled + default: + return fmt.Errorf("existingObjectReplicate should be either enable|disable") + } + } + newRule := Rule{ + ID: opts.ID, + Priority: priority, + Status: status, + Filter: filter, + Destination: Destination{ + Bucket: destBucket, + StorageClass: opts.StorageClass, + }, + DeleteMarkerReplication: DeleteMarkerReplication{Status: dmStatus}, + DeleteReplication: DeleteReplication{Status: vDeleteStatus}, + // MinIO enables replica metadata syncing by default in the case of bi-directional replication to allow + // automatic failover as the expectation in this case is that replica and source should be identical. + // However AWS leaves this configurable https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-for-metadata-changes.html + SourceSelectionCriteria: SourceSelectionCriteria{ + ReplicaModifications: ReplicaModifications{ + Status: replicaSync, + }, + }, + // By default disable existing object replication unless selected + ExistingObjectReplication: ExistingObjectReplication{ + Status: existingStatus, + }, + } + + // validate rule after overlaying priority for pre-existing rule being disabled. + if err := newRule.Validate(); err != nil { + return err + } + for _, rule := range c.Rules { + if rule.Priority == newRule.Priority { + return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") + } + if rule.Destination.Bucket != newRule.Destination.Bucket { + return fmt.Errorf("the destination bucket must be same for all rules") + } + if rule.ID == newRule.ID { + return fmt.Errorf("a rule exists with this ID") + } + } + + c.Rules = append(c.Rules, newRule) + return nil +} + +// EditRule modifies an existing rule in replication config +func (c *Config) EditRule(opts Options) error { + if opts.ID == "" { + return fmt.Errorf("rule ID missing") + } + rIdx := -1 + var newRule Rule + for i, rule := range c.Rules { + if rule.ID == opts.ID { + rIdx = i + newRule = rule + break + } + } + if rIdx < 0 { + return fmt.Errorf("rule with ID %s not found in replication configuration", opts.ID) + } + prefixChg := opts.Prefix != newRule.Prefix() + if opts.IsTagSet || prefixChg { + prefix := newRule.Prefix() + if prefix != opts.Prefix { + prefix = opts.Prefix + } + tags := []Tag{newRule.Filter.Tag} + if len(newRule.Filter.And.Tags) != 0 { + tags = newRule.Filter.And.Tags + } + var err error + if opts.IsTagSet { + tags, err = opts.Tags() + if err != nil { + return err + } + } + andVal := And{ + Tags: tags, + } + + filter := Filter{Prefix: prefix} + // only a single tag is set. + if prefix == "" && len(tags) == 1 { + filter.Tag = tags[0] + } + // both prefix and tag are present + if len(andVal.Tags) > 1 || prefix != "" { + filter.And = andVal + filter.And.Prefix = prefix + filter.Prefix = "" + filter.Tag = Tag{} + } + newRule.Filter = filter + } + + // toggle rule status for edit option + if opts.RuleStatus != "" { + switch opts.RuleStatus { + case "enable": + newRule.Status = Enabled + case "disable": + newRule.Status = Disabled + default: + return fmt.Errorf("rule state should be either [enable|disable]") + } + } + // set DeleteMarkerReplication rule status for edit option + if opts.ReplicateDeleteMarkers != "" { + switch opts.ReplicateDeleteMarkers { + case "enable": + newRule.DeleteMarkerReplication.Status = Enabled + case "disable": + newRule.DeleteMarkerReplication.Status = Disabled + default: + return fmt.Errorf("ReplicateDeleteMarkers state should be either [enable|disable]") + } + } + + // set DeleteReplication rule status for edit option. This is a MinIO specific + // option to replicate versioned deletes + if opts.ReplicateDeletes != "" { + switch opts.ReplicateDeletes { + case "enable": + newRule.DeleteReplication.Status = Enabled + case "disable": + newRule.DeleteReplication.Status = Disabled + default: + return fmt.Errorf("ReplicateDeletes state should be either [enable|disable]") + } + } + + if opts.ReplicaSync != "" { + switch opts.ReplicaSync { + case "enable", "": + newRule.SourceSelectionCriteria.ReplicaModifications.Status = Enabled + case "disable": + newRule.SourceSelectionCriteria.ReplicaModifications.Status = Disabled + default: + return fmt.Errorf("replica metadata sync should be either [enable|disable]") + } + } + fmt.Println("opts.ExistingObjectReplicate>", opts.ExistingObjectReplicate) + if opts.ExistingObjectReplicate != "" { + switch opts.ExistingObjectReplicate { + case "enable": + newRule.ExistingObjectReplication.Status = Enabled + case "disable": + newRule.ExistingObjectReplication.Status = Disabled + default: + return fmt.Errorf("existingObjectsReplication state should be either [enable|disable]") + } + } + if opts.IsSCSet { + newRule.Destination.StorageClass = opts.StorageClass + } + if opts.Priority != "" { + priority, err := strconv.Atoi(opts.Priority) + if err != nil { + return err + } + newRule.Priority = priority + } + if opts.DestBucket != "" { + destBucket := opts.DestBucket + // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html + if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 { + if len(btokens) == 1 { + destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket) + } else { + return fmt.Errorf("destination bucket needs to be in Arn format") + } + } + newRule.Destination.Bucket = destBucket + } + // validate rule + if err := newRule.Validate(); err != nil { + return err + } + // ensure priority and destination bucket restrictions are not violated + for idx, rule := range c.Rules { + if rule.Priority == newRule.Priority && rIdx != idx { + return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") + } + if rule.Destination.Bucket != newRule.Destination.Bucket { + return fmt.Errorf("the destination bucket must be same for all rules") + } + } + + c.Rules[rIdx] = newRule + return nil +} + +// RemoveRule removes a rule from replication config. +func (c *Config) RemoveRule(opts Options) error { + var newRules []Rule + ruleFound := false + for _, rule := range c.Rules { + if rule.ID != opts.ID { + newRules = append(newRules, rule) + continue + } + ruleFound = true + } + if !ruleFound { + return fmt.Errorf("Rule with ID %s not found", opts.ID) + } + if len(newRules) == 0 { + return fmt.Errorf("replication configuration should have at least one rule") + } + c.Rules = newRules + return nil + +} + +// Rule - a rule for replication configuration. +type Rule struct { + XMLName xml.Name `xml:"Rule" json:"-"` + ID string `xml:"ID,omitempty"` + Status Status `xml:"Status"` + Priority int `xml:"Priority"` + DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication"` + DeleteReplication DeleteReplication `xml:"DeleteReplication"` + Destination Destination `xml:"Destination"` + Filter Filter `xml:"Filter" json:"Filter"` + SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"` + ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication,omitempty"` +} + +// Validate validates the rule for correctness +func (r Rule) Validate() error { + if err := r.validateID(); err != nil { + return err + } + if err := r.validateStatus(); err != nil { + return err + } + if err := r.validateFilter(); err != nil { + return err + } + + if r.Priority < 0 && r.Status == Enabled { + return fmt.Errorf("priority must be set for the rule") + } + + if err := r.validateStatus(); err != nil { + return err + } + return r.ExistingObjectReplication.Validate() +} + +// validateID - checks if ID is valid or not. +func (r Rule) validateID() error { + // cannot be longer than 255 characters + if len(r.ID) > 255 { + return fmt.Errorf("ID must be less than 255 characters") + } + return nil +} + +// validateStatus - checks if status is valid or not. +func (r Rule) validateStatus() error { + // Status can't be empty + if len(r.Status) == 0 { + return fmt.Errorf("status cannot be empty") + } + + // Status must be one of Enabled or Disabled + if r.Status != Enabled && r.Status != Disabled { + return fmt.Errorf("status must be set to either Enabled or Disabled") + } + return nil +} + +func (r Rule) validateFilter() error { + if err := r.Filter.Validate(); err != nil { + return err + } + return nil +} + +// Prefix - a rule can either have prefix under or under +// . This method returns the prefix from the +// location where it is available +func (r Rule) Prefix() string { + if r.Filter.Prefix != "" { + return r.Filter.Prefix + } + return r.Filter.And.Prefix +} + +// Tags - a rule can either have tag under or under +// . This method returns all the tags from the +// rule in the format tag1=value1&tag2=value2 +func (r Rule) Tags() string { + ts := []Tag{r.Filter.Tag} + if len(r.Filter.And.Tags) != 0 { + ts = r.Filter.And.Tags + } + + var buf bytes.Buffer + for _, t := range ts { + if buf.Len() > 0 { + buf.WriteString("&") + } + buf.WriteString(t.String()) + } + return buf.String() +} + +// Filter - a filter for a replication configuration Rule. +type Filter struct { + XMLName xml.Name `xml:"Filter" json:"-"` + Prefix string `json:"Prefix,omitempty"` + And And `xml:"And,omitempty" json:"And,omitempty"` + Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` +} + +// Validate - validates the filter element +func (f Filter) Validate() error { + // A Filter must have exactly one of Prefix, Tag, or And specified. + if !f.And.isEmpty() { + if f.Prefix != "" { + return errInvalidFilter + } + if !f.Tag.IsEmpty() { + return errInvalidFilter + } + } + if f.Prefix != "" { + if !f.Tag.IsEmpty() { + return errInvalidFilter + } + } + if !f.Tag.IsEmpty() { + if err := f.Tag.Validate(); err != nil { + return err + } + } + return nil +} + +// Tag - a tag for a replication configuration Rule filter. +type Tag struct { + XMLName xml.Name `json:"-"` + Key string `xml:"Key,omitempty" json:"Key,omitempty"` + Value string `xml:"Value,omitempty" json:"Value,omitempty"` +} + +func (tag Tag) String() string { + if tag.IsEmpty() { + return "" + } + return tag.Key + "=" + tag.Value +} + +// IsEmpty returns whether this tag is empty or not. +func (tag Tag) IsEmpty() bool { + return tag.Key == "" +} + +// Validate checks this tag. +func (tag Tag) Validate() error { + if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 { + return fmt.Errorf("invalid Tag Key") + } + + if utf8.RuneCountInString(tag.Value) > 256 { + return fmt.Errorf("invalid Tag Value") + } + return nil +} + +// Destination - destination in ReplicationConfiguration. +type Destination struct { + XMLName xml.Name `xml:"Destination" json:"-"` + Bucket string `xml:"Bucket" json:"Bucket"` + StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` +} + +// And - a tag to combine a prefix and multiple tags for replication configuration rule. +type And struct { + XMLName xml.Name `xml:"And,omitempty" json:"-"` + Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` + Tags []Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` +} + +// isEmpty returns true if Tags field is null +func (a And) isEmpty() bool { + return len(a.Tags) == 0 && a.Prefix == "" +} + +// Status represents Enabled/Disabled status +type Status string + +// Supported status types +const ( + Enabled Status = "Enabled" + Disabled Status = "Disabled" +) + +// DeleteMarkerReplication - whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html +type DeleteMarkerReplication struct { + Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default +} + +// IsEmpty returns true if DeleteMarkerReplication is not set +func (d DeleteMarkerReplication) IsEmpty() bool { + return len(d.Status) == 0 +} + +// DeleteReplication - whether versioned deletes are replicated - this +// is a MinIO specific extension +type DeleteReplication struct { + Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default +} + +// IsEmpty returns true if DeleteReplication is not set +func (d DeleteReplication) IsEmpty() bool { + return len(d.Status) == 0 +} + +// ReplicaModifications specifies if replica modification sync is enabled +type ReplicaModifications struct { + Status Status `xml:"Status" json:"Status"` // should be set to "Enabled" by default +} + +// SourceSelectionCriteria - specifies additional source selection criteria in ReplicationConfiguration. +type SourceSelectionCriteria struct { + ReplicaModifications ReplicaModifications `xml:"ReplicaModifications" json:"ReplicaModifications"` +} + +// IsValid - checks whether SourceSelectionCriteria is valid or not. +func (s SourceSelectionCriteria) IsValid() bool { + return s.ReplicaModifications.Status == Enabled || s.ReplicaModifications.Status == Disabled +} + +// Validate source selection criteria +func (s SourceSelectionCriteria) Validate() error { + if (s == SourceSelectionCriteria{}) { + return nil + } + if !s.IsValid() { + return fmt.Errorf("invalid ReplicaModification status") + } + return nil +} + +// ExistingObjectReplication - whether existing object replication is enabled +type ExistingObjectReplication struct { + Status Status `xml:"Status"` // should be set to "Disabled" by default +} + +// IsEmpty returns true if DeleteMarkerReplication is not set +func (e ExistingObjectReplication) IsEmpty() bool { + return len(e.Status) == 0 +} + +// Validate validates whether the status is disabled. +func (e ExistingObjectReplication) Validate() error { + if e.IsEmpty() { + return nil + } + if e.Status != Disabled && e.Status != Enabled { + return fmt.Errorf("invalid ExistingObjectReplication status") + } + return nil +} + +// Metrics represents inline replication metrics +// such as pending, failed and completed bytes in total for a bucket +type Metrics struct { + // Pending size in bytes + PendingSize uint64 `json:"pendingReplicationSize"` + // Completed size in bytes + ReplicatedSize uint64 `json:"completedReplicationSize"` + // Total Replica size in bytes + ReplicaSize uint64 `json:"replicaSize"` + // Failed size in bytes + FailedSize uint64 `json:"failedReplicationSize"` + // Total number of pending operations including metadata updates + PendingCount uint64 `json:"pendingReplicationCount"` + // Total number of failed operations including metadata updates + FailedCount uint64 `json:"failedReplicationCount"` +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go new file mode 100644 index 00000000..fea25d6e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go @@ -0,0 +1,391 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3utils + +import ( + "bytes" + "encoding/hex" + "errors" + "net" + "net/url" + "regexp" + "sort" + "strings" + "unicode/utf8" +) + +// Sentinel URL is the default url value which is invalid. +var sentinelURL = url.URL{} + +// IsValidDomain validates if input string is a valid domain name. +func IsValidDomain(host string) bool { + // See RFC 1035, RFC 3696. + host = strings.TrimSpace(host) + if len(host) == 0 || len(host) > 255 { + return false + } + // host cannot start or end with "-" + if host[len(host)-1:] == "-" || host[:1] == "-" { + return false + } + // host cannot start or end with "_" + if host[len(host)-1:] == "_" || host[:1] == "_" { + return false + } + // host cannot start with a "." + if host[:1] == "." { + return false + } + // All non alphanumeric characters are invalid. + if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:> 1 { + return parts[1] + } + parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// IsAliyunOSSEndpoint - Match if it is exactly Aliyun OSS endpoint. +func IsAliyunOSSEndpoint(endpointURL url.URL) bool { + return strings.HasSuffix(endpointURL.Host, "aliyuncs.com") +} + +// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint. +func IsAmazonEndpoint(endpointURL url.URL) bool { + if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" { + return true + } + return GetRegionFromURL(endpointURL) != "" +} + +// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint. +func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || + IsAmazonFIPSGovCloudEndpoint(endpointURL)) +} + +// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint. +// See https://aws.amazon.com/compliance/fips. +func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" || + endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com" +} + +// IsAmazonFIPSUSEastWestEndpoint - Match if it is exactly Amazon S3 FIPS US East/West endpoint. +// See https://aws.amazon.com/compliance/fips. +func IsAmazonFIPSUSEastWestEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + switch endpointURL.Host { + case "s3-fips.us-east-2.amazonaws.com": + case "s3-fips.dualstack.us-west-1.amazonaws.com": + case "s3-fips.dualstack.us-west-2.amazonaws.com": + case "s3-fips.dualstack.us-east-2.amazonaws.com": + case "s3-fips.dualstack.us-east-1.amazonaws.com": + case "s3-fips.us-west-1.amazonaws.com": + case "s3-fips.us-west-2.amazonaws.com": + case "s3-fips.us-east-1.amazonaws.com": + default: + return false + } + return true +} + +// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint. +// See https://aws.amazon.com/compliance/fips. +func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { + return IsAmazonFIPSUSEastWestEndpoint(endpointURL) || IsAmazonFIPSGovCloudEndpoint(endpointURL) +} + +// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint. +func IsGoogleEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return endpointURL.Host == "storage.googleapis.com" +} + +// Expects ascii encoded strings - from output of urlEncodePath +func percentEncodeSlash(s string) string { + return strings.Replace(s, "/", "%2F", -1) +} + +// QueryEncode - encodes query values in their URL encoded form. In +// addition to the percent encoding performed by urlEncodePath() used +// here, it also percent encodes '/' (forward slash) +func QueryEncode(v url.Values) string { + if v == nil { + return "" + } + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := percentEncodeSlash(EncodePath(k)) + "=" + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(percentEncodeSlash(EncodePath(v))) + } + } + return buf.String() +} + +// TagDecode - decodes canonical tag into map of key and value. +func TagDecode(ctag string) map[string]string { + if ctag == "" { + return map[string]string{} + } + tags := strings.Split(ctag, "&") + tagMap := make(map[string]string, len(tags)) + var err error + for _, tag := range tags { + kvs := strings.SplitN(tag, "=", 2) + if len(kvs) == 0 { + return map[string]string{} + } + if len(kvs) == 1 { + return map[string]string{} + } + tagMap[kvs[0]], err = url.PathUnescape(kvs[1]) + if err != nil { + continue + } + } + return tagMap +} + +// TagEncode - encodes tag values in their URL encoded form. In +// addition to the percent encoding performed by urlEncodePath() used +// here, it also percent encodes '/' (forward slash) +func TagEncode(tags map[string]string) string { + if tags == nil { + return "" + } + values := url.Values{} + for k, v := range tags { + values[k] = []string{v} + } + return QueryEncode(values) +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname strings.Builder + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname.WriteRune(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname.WriteRune(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname.WriteString("%" + strings.ToUpper(hex)) + } + } + } + return encodedPathname.String() +} + +// We support '.' with bucket names but we fallback to using path +// style requests instead for such buckets. +var ( + validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`) + validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) + ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) +) + +// Common checker for both stricter and basic validation. +func checkBucketNameCommon(bucketName string, strict bool) (err error) { + if strings.TrimSpace(bucketName) == "" { + return errors.New("Bucket name cannot be empty") + } + if len(bucketName) < 3 { + return errors.New("Bucket name cannot be shorter than 3 characters") + } + if len(bucketName) > 63 { + return errors.New("Bucket name cannot be longer than 63 characters") + } + if ipAddress.MatchString(bucketName) { + return errors.New("Bucket name cannot be an ip address") + } + if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") { + return errors.New("Bucket name contains invalid characters") + } + if strict { + if !validBucketNameStrict.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err + } + if !validBucketName.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err +} + +// CheckValidBucketName - checks if we have a valid input bucket name. +func CheckValidBucketName(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, false) +} + +// CheckValidBucketNameStrict - checks if we have a valid input bucket name. +// This is a stricter version. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html +func CheckValidBucketNameStrict(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, true) +} + +// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func CheckValidObjectNamePrefix(objectName string) error { + if len(objectName) > 1024 { + return errors.New("Object name cannot be longer than 1024 characters") + } + if !utf8.ValidString(objectName) { + return errors.New("Object name with non UTF-8 strings are not supported") + } + return nil +} + +// CheckValidObjectName - checks if we have a valid input object name. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func CheckValidObjectName(objectName string) error { + if strings.TrimSpace(objectName) == "" { + return errors.New("Object name cannot be empty") + } + return CheckValidObjectNamePrefix(objectName) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go new file mode 100644 index 00000000..c35e58e1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go @@ -0,0 +1,200 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package set + +import ( + "fmt" + "sort" + + jsoniter "github.com/json-iterator/go" +) + +// StringSet - uses map as set of strings. +type StringSet map[string]struct{} + +var json = jsoniter.ConfigCompatibleWithStandardLibrary + +// ToSlice - returns StringSet as string slice. +func (set StringSet) ToSlice() []string { + keys := make([]string, 0, len(set)) + for k := range set { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// IsEmpty - returns whether the set is empty or not. +func (set StringSet) IsEmpty() bool { + return len(set) == 0 +} + +// Add - adds string to the set. +func (set StringSet) Add(s string) { + set[s] = struct{}{} +} + +// Remove - removes string in the set. It does nothing if string does not exist in the set. +func (set StringSet) Remove(s string) { + delete(set, s) +} + +// Contains - checks if string is in the set. +func (set StringSet) Contains(s string) bool { + _, ok := set[s] + return ok +} + +// FuncMatch - returns new set containing each value who passes match function. +// A 'matchFn' should accept element in a set as first argument and +// 'matchString' as second argument. The function can do any logic to +// compare both the arguments and should return true to accept element in +// a set to include in output set else the element is ignored. +func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet { + nset := NewStringSet() + for k := range set { + if matchFn(k, matchString) { + nset.Add(k) + } + } + return nset +} + +// ApplyFunc - returns new set containing each value processed by 'applyFn'. +// A 'applyFn' should accept element in a set as a argument and return +// a processed string. The function can do any logic to return a processed +// string. +func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(applyFn(k)) + } + return nset +} + +// Equals - checks whether given set is equal to current set or not. +func (set StringSet) Equals(sset StringSet) bool { + // If length of set is not equal to length of given set, the + // set is not equal to given set. + if len(set) != len(sset) { + return false + } + + // As both sets are equal in length, check each elements are equal. + for k := range set { + if _, ok := sset[k]; !ok { + return false + } + } + + return true +} + +// Intersection - returns the intersection with given set as new set. +func (set StringSet) Intersection(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; ok { + nset.Add(k) + } + } + + return nset +} + +// Difference - returns the difference with given set as new set. +func (set StringSet) Difference(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; !ok { + nset.Add(k) + } + } + + return nset +} + +// Union - returns the union with given set as new set. +func (set StringSet) Union(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(k) + } + + for k := range sset { + nset.Add(k) + } + + return nset +} + +// MarshalJSON - converts to JSON data. +func (set StringSet) MarshalJSON() ([]byte, error) { + return json.Marshal(set.ToSlice()) +} + +// UnmarshalJSON - parses JSON data and creates new set with it. +// If 'data' contains JSON string array, the set contains each string. +// If 'data' contains JSON string, the set contains the string as one element. +// If 'data' contains Other JSON types, JSON parse error is returned. +func (set *StringSet) UnmarshalJSON(data []byte) error { + sl := []string{} + var err error + if err = json.Unmarshal(data, &sl); err == nil { + *set = make(StringSet) + for _, s := range sl { + set.Add(s) + } + } else { + var s string + if err = json.Unmarshal(data, &s); err == nil { + *set = make(StringSet) + set.Add(s) + } + } + + return err +} + +// String - returns printable string of the set. +func (set StringSet) String() string { + return fmt.Sprintf("%s", set.ToSlice()) +} + +// NewStringSet - creates new string set. +func NewStringSet() StringSet { + return make(StringSet) +} + +// CreateStringSet - creates new string set with given string values. +func CreateStringSet(sl ...string) StringSet { + set := make(StringSet) + for _, k := range sl { + set.Add(k) + } + return set +} + +// CopyStringSet - returns copy of given set. +func CopyStringSet(set StringSet) StringSet { + nset := NewStringSet() + for k, v := range set { + nset[k] = v + } + return nset +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go new file mode 100644 index 00000000..7b2ca91d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go @@ -0,0 +1,306 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" + "time" +) + +// Reference for constants used below - +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming +const ( + streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + payloadChunkSize = 64 * 1024 + chunkSigConstLen = 17 // ";chunk-signature=" + signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" + crlfLen = 2 // CRLF +) + +// Request headers to be ignored while calculating seed signature for +// a request. +var ignoredStreamingHeaders = map[string]bool{ + "Authorization": true, + "User-Agent": true, + "Content-Type": true, +} + +// getSignedChunkLength - calculates the length of chunk metadata +func getSignedChunkLength(chunkDataSize int64) int64 { + return int64(len(fmt.Sprintf("%x", chunkDataSize))) + + chunkSigConstLen + + signatureStrLen + + crlfLen + + chunkDataSize + + crlfLen +} + +// getStreamLength - calculates the length of the overall stream (data + metadata) +func getStreamLength(dataLen, chunkSize int64) int64 { + if dataLen <= 0 { + return 0 + } + + chunksCount := int64(dataLen / chunkSize) + remainingBytes := int64(dataLen % chunkSize) + streamLen := int64(0) + streamLen += chunksCount * getSignedChunkLength(chunkSize) + if remainingBytes > 0 { + streamLen += getSignedChunkLength(remainingBytes) + } + streamLen += getSignedChunkLength(0) + return streamLen +} + +// buildChunkStringToSign - returns the string to sign given chunk data +// and previous signature. +func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { + stringToSignParts := []string{ + streamingPayloadHdr, + t.Format(iso8601DateFormat), + getScope(region, t, ServiceTypeS3), + previousSig, + emptySHA256, + hex.EncodeToString(sum256(chunkData)), + } + + return strings.Join(stringToSignParts, "\n") +} + +// prepareStreamingRequest - prepares a request with appropriate +// headers before computing the seed signature. +func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { + // Set x-amz-content-sha256 header. + req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) + // Set content length with streaming signature for each chunk included. + req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize)) + req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) +} + +// buildChunkHeader - returns the chunk header. +// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n +func buildChunkHeader(chunkLen int64, signature string) []byte { + return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n") +} + +// buildChunkSignature - returns chunk signature for a given chunk and previous signature. +func buildChunkSignature(chunkData []byte, reqTime time.Time, region, + previousSignature, secretAccessKey string) string { + + chunkStringToSign := buildChunkStringToSign(reqTime, region, + previousSignature, chunkData) + signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) + return getSignature(signingKey, chunkStringToSign) +} + +// getSeedSignature - returns the seed signature for a given request. +func (s *StreamingReader) setSeedSignature(req *http.Request) { + // Get canonical request + canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders, getHashedPayload(*req)) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest, ServiceTypeS3) + + signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime, ServiceTypeS3) + + // Calculate signature. + s.seedSignature = getSignature(signingKey, stringToSign) +} + +// StreamingReader implements chunked upload signature as a reader on +// top of req.Body's ReaderCloser chunk header;data;... repeat +type StreamingReader struct { + accessKeyID string + secretAccessKey string + sessionToken string + region string + prevSignature string + seedSignature string + contentLen int64 // Content-Length from req header + baseReadCloser io.ReadCloser // underlying io.Reader + bytesRead int64 // bytes read from underlying io.Reader + buf bytes.Buffer // holds signed chunk + chunkBuf []byte // holds raw data read from req Body + chunkBufLen int // no. of bytes read so far into chunkBuf + done bool // done reading the underlying reader to EOF + reqTime time.Time + chunkNum int + totalChunks int + lastChunkSize int +} + +// signChunk - signs a chunk read from s.baseReader of chunkLen size. +func (s *StreamingReader) signChunk(chunkLen int) { + // Compute chunk signature for next header + signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, + s.region, s.prevSignature, s.secretAccessKey) + + // For next chunk signature computation + s.prevSignature = signature + + // Write chunk header into streaming buffer + chunkHdr := buildChunkHeader(int64(chunkLen), signature) + s.buf.Write(chunkHdr) + + // Write chunk data into streaming buffer + s.buf.Write(s.chunkBuf[:chunkLen]) + + // Write the chunk trailer. + s.buf.Write([]byte("\r\n")) + + // Reset chunkBufLen for next chunk read. + s.chunkBufLen = 0 + s.chunkNum++ +} + +// setStreamingAuthHeader - builds and sets authorization header value +// for streaming signature. +func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { + credential := GetCredential(s.accessKeyID, s.region, s.reqTime, ServiceTypeS3) + authParts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders), + "Signature=" + s.seedSignature, + } + + // Set authorization header. + auth := strings.Join(authParts, ",") + req.Header.Set("Authorization", auth) +} + +// StreamingSignV4 - provides chunked upload signatureV4 support by +// implementing io.Reader. +func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, + region string, dataLen int64, reqTime time.Time) *http.Request { + + // Set headers needed for streaming signature. + prepareStreamingRequest(req, sessionToken, dataLen, reqTime) + + if req.Body == nil { + req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) + } + + stReader := &StreamingReader{ + baseReadCloser: req.Body, + accessKeyID: accessKeyID, + secretAccessKey: secretAccessKey, + sessionToken: sessionToken, + region: region, + reqTime: reqTime, + chunkBuf: make([]byte, payloadChunkSize), + contentLen: dataLen, + chunkNum: 1, + totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, + lastChunkSize: int(dataLen % payloadChunkSize), + } + + // Add the request headers required for chunk upload signing. + + // Compute the seed signature. + stReader.setSeedSignature(req) + + // Set the authorization header with the seed signature. + stReader.setStreamingAuthHeader(req) + + // Set seed signature as prevSignature for subsequent + // streaming signing process. + stReader.prevSignature = stReader.seedSignature + req.Body = stReader + + return req +} + +// Read - this method performs chunk upload signature providing a +// io.Reader interface. +func (s *StreamingReader) Read(buf []byte) (int, error) { + switch { + // After the last chunk is read from underlying reader, we + // never re-fill s.buf. + case s.done: + + // s.buf will be (re-)filled with next chunk when has lesser + // bytes than asked for. + case s.buf.Len() < len(buf): + s.chunkBufLen = 0 + for { + n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) + // Usually we validate `err` first, but in this case + // we are validating n > 0 for the following reasons. + // + // 1. n > 0, err is one of io.EOF, nil (near end of stream) + // A Reader returning a non-zero number of bytes at the end + // of the input stream may return either err == EOF or err == nil + // + // 2. n == 0, err is io.EOF (actual end of stream) + // + // Callers should always process the n > 0 bytes returned + // before considering the error err. + if n1 > 0 { + s.chunkBufLen += n1 + s.bytesRead += int64(n1) + + if s.chunkBufLen == payloadChunkSize || + (s.chunkNum == s.totalChunks-1 && + s.chunkBufLen == s.lastChunkSize) { + // Sign the chunk and write it to s.buf. + s.signChunk(s.chunkBufLen) + break + } + } + if err != nil { + if err == io.EOF { + // No more data left in baseReader - last chunk. + // Done reading the last chunk from baseReader. + s.done = true + + // bytes read from baseReader different than + // content length provided. + if s.bytesRead != s.contentLen { + return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) + } + + // Sign the chunk and write it to s.buf. + s.signChunk(0) + break + } + return 0, err + } + + } + } + return s.buf.Read(buf) +} + +// Close - this method makes underlying io.ReadCloser's Close method available. +func (s *StreamingReader) Close() error { + return s.baseReadCloser.Close() +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go new file mode 100644 index 00000000..71821a26 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go @@ -0,0 +1,317 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// Signature and API related constants. +const ( + signV2Algorithm = "AWS" +) + +// Encode input URL path to URL encoded path. +func encodeURL2Path(req *http.Request, virtualHost bool) (path string) { + if virtualHost { + reqHost := getHostAddr(req) + dotPos := strings.Index(reqHost, ".") + if dotPos > -1 { + bucketName := reqHost[:dotPos] + path = "/" + bucketName + path += req.URL.Path + path = s3utils.EncodePath(path) + return + } + } + path = s3utils.EncodePath(req.URL.Path) + return +} + +// PreSignV2 - presign the request in following style. +// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. +func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + d := time.Now().UTC() + // Find epoch expires when the request will expire. + epochExpires := d.Unix() + expires + + // Add expires header if not present. + if expiresStr := req.Header.Get("Expires"); expiresStr == "" { + req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10)) + } + + // Get presigned string to sign. + stringToSign := preStringToSignV2(req, virtualHost) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Calculate signature. + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + + query := req.URL.Query() + // Handle specially for Google Cloud Storage. + if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") { + query.Set("GoogleAccessId", accessKeyID) + } else { + query.Set("AWSAccessKeyId", accessKeyID) + } + + // Fill in Expires for presigned query. + query.Set("Expires", strconv.FormatInt(epochExpires, 10)) + + // Encode query and save. + req.URL.RawQuery = s3utils.QueryEncode(query) + + // Save signature finally. + req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature) + + // Return. + return &req +} + +// PostPresignSignatureV2 - presigned signature for PostPolicy +// request. +func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(policyBase64)) + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + return signature +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + +// SignV2 sign the request before Do() (AWS Signature Version 2). +func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + d := time.Now().UTC() + + // Add date if not present. + if date := req.Header.Get("Date"); date == "" { + req.Header.Set("Date", d.Format(http.TimeFormat)) + } + + // Calculate HMAC for secretAccessKey. + stringToSign := stringToSignV2(req, virtualHost) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Prepare auth header. + authHeader := new(bytes.Buffer) + authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID)) + encoder := base64.NewEncoder(base64.StdEncoding, authHeader) + encoder.Write(hm.Sum(nil)) + encoder.Close() + + // Set Authorization header. + req.Header.Set("Authorization", authHeader.String()) + + return &req +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Expires + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func preStringToSignV2(req http.Request, virtualHost bool) string { + buf := new(bytes.Buffer) + // Write standard headers. + writePreSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + writeCanonicalizedResource(buf, req, virtualHost) + return buf.String() +} + +// writePreSignV2Headers - write preSign v2 required headers. +func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Expires") + "\n") +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func stringToSignV2(req http.Request, virtualHost bool) string { + buf := new(bytes.Buffer) + // Write standard headers. + writeSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + writeCanonicalizedResource(buf, req, virtualHost) + return buf.String() +} + +// writeSignV2Headers - write signV2 required headers. +func writeSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Date") + "\n") +} + +// writeCanonicalizedHeaders - write canonicalized headers. +func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { + var protoHeaders []string + vals := make(map[string][]string) + for k, vv := range req.Header { + // All the AMZ headers should be lowercase + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-amz") { + protoHeaders = append(protoHeaders, lk) + vals[lk] = vv + } + } + sort.Strings(protoHeaders) + for _, k := range protoHeaders { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + if strings.Contains(v, "\n") { + // TODO: "Unfold" long headers that + // span multiple lines (as allowed by + // RFC 2616, section 4.2) by replacing + // the folding white-space (including + // new-line) by a single space. + buf.WriteString(v) + } else { + buf.WriteString(v) + } + } + buf.WriteByte('\n') + } +} + +// AWS S3 Signature V2 calculation rule is give here: +// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign + +// Whitelist resource list that will be used in query string for signature-V2 calculation. +// The list should be alphabetically sorted +var resourceList = []string{ + "acl", + "delete", + "lifecycle", + "location", + "logging", + "notification", + "partNumber", + "policy", + "replication", + "requestPayment", + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "response-content-language", + "response-content-type", + "response-expires", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// From the Amazon docs: +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { + // Save request URL. + requestURL := req.URL + // Get encoded URL path. + buf.WriteString(encodeURL2Path(&req, virtualHost)) + if requestURL.RawQuery != "" { + var n int + vals, _ := url.ParseQuery(requestURL.RawQuery) + // Verify if any sub resource queries are present, if yes + // canonicallize them. + for _, resource := range resourceList { + if vv, ok := vals[resource]; ok && len(vv) > 0 { + n++ + // First element + switch n { + case 1: + buf.WriteByte('?') + // The rest + default: + buf.WriteByte('&') + } + buf.WriteString(resource) + // Request parameters + if len(vv[0]) > 0 { + buf.WriteByte('=') + buf.WriteString(vv[0]) + } + } + } + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go new file mode 100644 index 00000000..67572b20 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go @@ -0,0 +1,318 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "encoding/hex" + "net/http" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// Signature and API related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" + yyyymmdd = "20060102" +) + +// Different service types +const ( + ServiceTypeS3 = "s3" + ServiceTypeSTS = "sts" +) + +/// +/// Excerpts from @lsegal - +/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. +/// +/// User-Agent: +/// +/// This is ignored from signing because signing this causes +/// problems with generating pre-signed URLs (that are executed +/// by other agents) or when customers pass requests through +/// proxies, which may modify the user-agent. +/// +/// +/// Authorization: +/// +/// Is skipped for obvious reasons +/// +var v4IgnoredHeaders = map[string]bool{ + "Authorization": true, + "User-Agent": true, +} + +// getSigningKey hmac seed to calculate final signature. +func getSigningKey(secret, loc string, t time.Time, serviceType string) []byte { + date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) + location := sumHMAC(date, []byte(loc)) + service := sumHMAC(location, []byte(serviceType)) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// getScope generate a string of a specific date, an AWS region, and a +// service. +func getScope(location string, t time.Time, serviceType string) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + location, + serviceType, + "aws4_request", + }, "/") + return scope +} + +// GetCredential generate a credential string. +func GetCredential(accessKeyID, location string, t time.Time, serviceType string) string { + scope := getScope(location, t, serviceType) + return accessKeyID + "/" + scope +} + +// getHashedPayload get the hexadecimal value of the SHA256 hash of +// the request payload. +func getHashedPayload(req http.Request) string { + hashedPayload := req.Header.Get("X-Amz-Content-Sha256") + if hashedPayload == "" { + // Presign does not have a payload, use S3 recommended value. + hashedPayload = unsignedPayload + } + return hashedPayload +} + +// getCanonicalHeaders generate a list of request headers for +// signature. +func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string { + var headers []string + vals := make(map[string][]string) + for k, vv := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + headers = append(headers, "host") + sort.Strings(headers) + + var buf bytes.Buffer + // Save all the headers in canonical form
: newline + // separated for each header. + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(getHostAddr(&req)) + fallthrough + default: + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(signV4TrimAll(v)) + } + buf.WriteByte('\n') + } + } + return buf.String() +} + +// getSignedHeaders generate all signed request headers. +// i.e lexically sorted, semicolon-separated list of lowercase +// request header names. +func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { + var headers []string + for k := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // Ignored header found continue. + } + headers = append(headers, strings.ToLower(k)) + } + headers = append(headers, "host") + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getCanonicalRequest generate a canonical request of style. +// +// canonicalRequest = +// \n +// \n +// \n +// \n +// \n +// +func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string { + req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) + canonicalRequest := strings.Join([]string{ + req.Method, + s3utils.EncodePath(req.URL.Path), + req.URL.RawQuery, + getCanonicalHeaders(req, ignoredHeaders), + getSignedHeaders(req, ignoredHeaders), + hashedPayload, + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values. +func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" + stringToSign = stringToSign + getScope(location, t, serviceType) + "\n" + stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) + return stringToSign +} + +// PreSignV4 presign the request, in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. +func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Get credential string. + credential := GetCredential(accessKeyID, location, t, ServiceTypeS3) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) + + // Set URL query. + query := req.URL.Query() + query.Set("X-Amz-Algorithm", signV4Algorithm) + query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) + query.Set("X-Amz-SignedHeaders", signedHeaders) + query.Set("X-Amz-Credential", credential) + // Set session token if available. + if sessionToken != "" { + query.Set("X-Amz-Security-Token", sessionToken) + } + req.URL.RawQuery = query.Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, getHashedPayload(req)) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest, ServiceTypeS3) + + // Gext hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // Add signature header to RawQuery. + req.URL.RawQuery += "&X-Amz-Signature=" + signature + + return &req +} + +// PostPresignSignatureV4 - presigned signature for PostPolicy +// requests. +func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { + // Get signining key. + signingkey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3) + // Calculate signature. + signature := getSignature(signingkey, policyBase64) + return signature +} + +// SignV4STS - signature v4 for STS request. +func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS) +} + +// Internal function called for different service types. +func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Set x-amz-date. + req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + + // Set session token if available. + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + hashedPayload := getHashedPayload(req) + if serviceType == ServiceTypeSTS { + // Content sha256 header is not sent with the request + // but it is expected to have sha256 of payload for signature + // in STS service type request. + req.Header.Del("X-Amz-Content-Sha256") + } + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, hashedPayload) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest, serviceType) + + // Get hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t, serviceType) + + // Get credential string. + credential := GetCredential(accessKeyID, location, t, serviceType) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // If regular request, construct the final authorization header. + parts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + + // Set authorization header. + auth := strings.Join(parts, ", ") + req.Header.Set("Authorization", auth) + + return &req +} + +// SignV4 sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. +func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go new file mode 100644 index 00000000..2192a369 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go @@ -0,0 +1,59 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "crypto/hmac" + "net/http" + "strings" + + "github.com/minio/sha256-simd" +) + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// sum256 calculate sha256 sum for an input byte array. +func sum256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// getHostAddr returns host header if available, otherwise returns host from URL +func getHostAddr(req *http.Request) string { + if req.Host != "" { + return req.Host + } + return req.URL.Host +} + +// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() +// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +func signV4TrimAll(input string) string { + // Compress adjacent spaces (a space is determined by + // unicode.IsSpace() internally here) to one space and return + return strings.Join(strings.Fields(input), " ") +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go new file mode 100644 index 00000000..b5fb9565 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go @@ -0,0 +1,66 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sse + +import "encoding/xml" + +// ApplySSEByDefault defines default encryption configuration, KMS or SSE. To activate +// KMS, SSEAlgoritm needs to be set to "aws:kms" +// Minio currently does not support Kms. +type ApplySSEByDefault struct { + KmsMasterKeyID string `xml:"KMSMasterKeyID,omitempty"` + SSEAlgorithm string `xml:"SSEAlgorithm"` +} + +// Rule layer encapsulates default encryption configuration +type Rule struct { + Apply ApplySSEByDefault `xml:"ApplyServerSideEncryptionByDefault"` +} + +// Configuration is the default encryption configuration structure +type Configuration struct { + XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"` + Rules []Rule `xml:"Rule"` +} + +// NewConfigurationSSES3 initializes a new SSE-S3 configuration +func NewConfigurationSSES3() *Configuration { + return &Configuration{ + Rules: []Rule{ + { + Apply: ApplySSEByDefault{ + SSEAlgorithm: "AES256", + }, + }, + }, + } +} + +// NewConfigurationSSEKMS initializes a new SSE-KMS configuration +func NewConfigurationSSEKMS(kmsMasterKey string) *Configuration { + return &Configuration{ + Rules: []Rule{ + { + Apply: ApplySSEByDefault{ + KmsMasterKeyID: kmsMasterKey, + SSEAlgorithm: "aws:kms", + }, + }, + }, + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go new file mode 100644 index 00000000..d7c65af5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go @@ -0,0 +1,341 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tags + +import ( + "encoding/xml" + "io" + "net/url" + "strings" + "unicode/utf8" +) + +// Error contains tag specific error. +type Error interface { + error + Code() string +} + +type errTag struct { + code string + message string +} + +// Code contains error code. +func (err errTag) Code() string { + return err.code +} + +// Error contains error message. +func (err errTag) Error() string { + return err.message +} + +var ( + errTooManyObjectTags = &errTag{"BadRequest", "Tags cannot be more than 10"} + errTooManyTags = &errTag{"BadRequest", "Tags cannot be more than 50"} + errInvalidTagKey = &errTag{"InvalidTag", "The TagKey you have provided is invalid"} + errInvalidTagValue = &errTag{"InvalidTag", "The TagValue you have provided is invalid"} + errDuplicateTagKey = &errTag{"InvalidTag", "Cannot provide multiple Tags with the same key"} +) + +// Tag comes with limitation as per +// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html amd +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions +const ( + maxKeyLength = 128 + maxValueLength = 256 + maxObjectTagCount = 10 + maxTagCount = 50 +) + +func checkKey(key string) error { + if len(key) == 0 || utf8.RuneCountInString(key) > maxKeyLength || strings.Contains(key, "&") { + return errInvalidTagKey + } + + return nil +} + +func checkValue(value string) error { + if utf8.RuneCountInString(value) > maxValueLength || strings.Contains(value, "&") { + return errInvalidTagValue + } + + return nil +} + +// Tag denotes key and value. +type Tag struct { + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +func (tag Tag) String() string { + return tag.Key + "=" + tag.Value +} + +// IsEmpty returns whether this tag is empty or not. +func (tag Tag) IsEmpty() bool { + return tag.Key == "" +} + +// Validate checks this tag. +func (tag Tag) Validate() error { + if err := checkKey(tag.Key); err != nil { + return err + } + + return checkValue(tag.Value) +} + +// MarshalXML encodes to XML data. +func (tag Tag) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := tag.Validate(); err != nil { + return err + } + + type subTag Tag // to avoid recursively calling MarshalXML() + return e.EncodeElement(subTag(tag), start) +} + +// UnmarshalXML decodes XML data to tag. +func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type subTag Tag // to avoid recursively calling UnmarshalXML() + var st subTag + if err := d.DecodeElement(&st, &start); err != nil { + return err + } + + if err := Tag(st).Validate(); err != nil { + return err + } + + *tag = Tag(st) + return nil +} + +// tagSet represents list of unique tags. +type tagSet struct { + tagMap map[string]string + isObject bool +} + +func (tags tagSet) String() string { + vals := make(url.Values) + for key, value := range tags.tagMap { + vals.Set(key, value) + } + return vals.Encode() +} + +func (tags *tagSet) remove(key string) { + delete(tags.tagMap, key) +} + +func (tags *tagSet) set(key, value string, failOnExist bool) error { + if failOnExist { + if _, found := tags.tagMap[key]; found { + return errDuplicateTagKey + } + } + + if err := checkKey(key); err != nil { + return err + } + + if err := checkValue(value); err != nil { + return err + } + + if tags.isObject { + if len(tags.tagMap) == maxObjectTagCount { + return errTooManyObjectTags + } + } else if len(tags.tagMap) == maxTagCount { + return errTooManyTags + } + + tags.tagMap[key] = value + return nil +} + +func (tags tagSet) toMap() map[string]string { + m := make(map[string]string) + for key, value := range tags.tagMap { + m[key] = value + } + return m +} + +// MarshalXML encodes to XML data. +func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + tagList := struct { + Tags []Tag `xml:"Tag"` + }{} + + for key, value := range tags.tagMap { + tagList.Tags = append(tagList.Tags, Tag{key, value}) + } + + return e.EncodeElement(tagList, start) +} + +// UnmarshalXML decodes XML data to tag list. +func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tagList := struct { + Tags []Tag `xml:"Tag"` + }{} + + if err := d.DecodeElement(&tagList, &start); err != nil { + return err + } + + if tags.isObject { + if len(tagList.Tags) > maxObjectTagCount { + return errTooManyObjectTags + } + } else if len(tagList.Tags) > maxTagCount { + return errTooManyTags + } + + m := map[string]string{} + for _, tag := range tagList.Tags { + if _, found := m[tag.Key]; found { + return errDuplicateTagKey + } + + m[tag.Key] = tag.Value + } + + tags.tagMap = m + return nil +} + +type tagging struct { + XMLName xml.Name `xml:"Tagging"` + TagSet *tagSet `xml:"TagSet"` +} + +// Tags is list of tags of XML request/response as per +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html#API_GetBucketTagging_RequestBody +type Tags tagging + +func (tags Tags) String() string { + return tags.TagSet.String() +} + +// Remove removes a tag by its key. +func (tags *Tags) Remove(key string) { + tags.TagSet.remove(key) +} + +// Set sets new tag. +func (tags *Tags) Set(key, value string) error { + return tags.TagSet.set(key, value, false) +} + +// ToMap returns copy of tags. +func (tags Tags) ToMap() map[string]string { + return tags.TagSet.toMap() +} + +// MapToObjectTags converts an input map of key and value into +// *Tags data structure with validation. +func MapToObjectTags(tagMap map[string]string) (*Tags, error) { + return NewTags(tagMap, true) +} + +// MapToBucketTags converts an input map of key and value into +// *Tags data structure with validation. +func MapToBucketTags(tagMap map[string]string) (*Tags, error) { + return NewTags(tagMap, false) +} + +// NewTags creates Tags from tagMap, If isObject is set, it validates for object tags. +func NewTags(tagMap map[string]string, isObject bool) (*Tags, error) { + tagging := &Tags{ + TagSet: &tagSet{ + tagMap: make(map[string]string), + isObject: isObject, + }, + } + + for key, value := range tagMap { + if err := tagging.TagSet.set(key, value, true); err != nil { + return nil, err + } + } + + return tagging, nil +} + +func unmarshalXML(reader io.Reader, isObject bool) (*Tags, error) { + tagging := &Tags{ + TagSet: &tagSet{ + tagMap: make(map[string]string), + isObject: isObject, + }, + } + + if err := xml.NewDecoder(reader).Decode(tagging); err != nil { + return nil, err + } + + return tagging, nil +} + +// ParseBucketXML decodes XML data of tags in reader specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html#API_PutBucketTagging_RequestSyntax. +func ParseBucketXML(reader io.Reader) (*Tags, error) { + return unmarshalXML(reader, false) +} + +// ParseObjectXML decodes XML data of tags in reader specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html#API_PutObjectTagging_RequestSyntax +func ParseObjectXML(reader io.Reader) (*Tags, error) { + return unmarshalXML(reader, true) +} + +// Parse decodes HTTP query formatted string into tags which is limited by isObject. +// A query formatted string is like "key1=value1&key2=value2". +func Parse(s string, isObject bool) (*Tags, error) { + values, err := url.ParseQuery(s) + if err != nil { + return nil, err + } + + tagging := &Tags{ + TagSet: &tagSet{ + tagMap: make(map[string]string), + isObject: isObject, + }, + } + + for key := range values { + if err := tagging.TagSet.set(key, values.Get(key), true); err != nil { + return nil, err + } + } + + return tagging, nil +} + +// ParseObjectTags decodes HTTP query formatted string into tags. A query formatted string is like "key1=value1&key2=value2". +func ParseObjectTags(s string) (*Tags, error) { + return Parse(s, true) +} diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go new file mode 100644 index 00000000..31a7308c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -0,0 +1,327 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/base64" + "fmt" + "strings" + "time" +) + +// expirationDateFormat date format for expiration key in json policy. +const expirationDateFormat = "2006-01-02T15:04:05.999Z" + +// policyCondition explanation: +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +// +// Example: +// +// policyCondition { +// matchType: "$eq", +// key: "$Content-Type", +// value: "image/png", +// } +// +type policyCondition struct { + matchType string + condition string + value string +} + +// PostPolicy - Provides strict static type conversion and validation +// for Amazon S3's POST policy JSON string. +type PostPolicy struct { + // Expiration date and time of the POST policy. + expiration time.Time + // Collection of different policy conditions. + conditions []policyCondition + // ContentLengthRange minimum and maximum allowable size for the + // uploaded content. + contentLengthRange struct { + min int64 + max int64 + } + + // Post form data. + formData map[string]string +} + +// NewPostPolicy - Instantiate new post policy. +func NewPostPolicy() *PostPolicy { + p := &PostPolicy{} + p.conditions = make([]policyCondition, 0) + p.formData = make(map[string]string) + return p +} + +// SetExpires - Sets expiration time for the new policy. +func (p *PostPolicy) SetExpires(t time.Time) error { + if t.IsZero() { + return errInvalidArgument("No expiry time set.") + } + p.expiration = t + return nil +} + +// SetKey - Sets an object name for the policy based upload. +func (p *PostPolicy) SetKey(key string) error { + if strings.TrimSpace(key) == "" || key == "" { + return errInvalidArgument("Object name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$key", + value: key, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = key + return nil +} + +// SetKeyStartsWith - Sets an object name that an policy based upload +// can start with. +func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { + if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { + return errInvalidArgument("Object prefix is empty.") + } + policyCond := policyCondition{ + matchType: "starts-with", + condition: "$key", + value: keyStartsWith, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = keyStartsWith + return nil +} + +// SetBucket - Sets bucket at which objects will be uploaded to. +func (p *PostPolicy) SetBucket(bucketName string) error { + if strings.TrimSpace(bucketName) == "" || bucketName == "" { + return errInvalidArgument("Bucket name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$bucket", + value: bucketName, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["bucket"] = bucketName + return nil +} + +// SetCondition - Sets condition for credentials, date and algorithm +func (p *PostPolicy) SetCondition(matchType, condition, value string) error { + if strings.TrimSpace(value) == "" || value == "" { + return errInvalidArgument("No value specified for condition") + } + + policyCond := policyCondition{ + matchType: matchType, + condition: "$" + condition, + value: value, + } + if condition == "X-Amz-Credential" || condition == "X-Amz-Date" || condition == "X-Amz-Algorithm" { + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[condition] = value + return nil + } + return errInvalidArgument("Invalid condition in policy") +} + +// SetContentType - Sets content-type of the object for this policy +// based upload. +func (p *PostPolicy) SetContentType(contentType string) error { + if strings.TrimSpace(contentType) == "" || contentType == "" { + return errInvalidArgument("No content type specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Type", + value: contentType, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Type"] = contentType + return nil +} + +// SetContentTypeStartsWith - Sets what content-type of the object for this policy +// based upload can start with. +func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error { + if strings.TrimSpace(contentTypeStartsWith) == "" || contentTypeStartsWith == "" { + return errInvalidArgument("No content type specified.") + } + policyCond := policyCondition{ + matchType: "starts-with", + condition: "$Content-Type", + value: contentTypeStartsWith, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Type"] = contentTypeStartsWith + return nil +} + +// SetContentLengthRange - Set new min and max content length +// condition for all incoming uploads. +func (p *PostPolicy) SetContentLengthRange(min, max int64) error { + if min > max { + return errInvalidArgument("Minimum limit is larger than maximum limit.") + } + if min < 0 { + return errInvalidArgument("Minimum limit cannot be negative.") + } + if max < 0 { + return errInvalidArgument("Maximum limit cannot be negative.") + } + p.contentLengthRange.min = min + p.contentLengthRange.max = max + return nil +} + +// SetSuccessActionRedirect - Sets the redirect success url of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error { + if strings.TrimSpace(redirect) == "" || redirect == "" { + return errInvalidArgument("Redirect is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_redirect", + value: redirect, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_redirect"] = redirect + return nil +} + +// SetSuccessStatusAction - Sets the status success code of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessStatusAction(status string) error { + if strings.TrimSpace(status) == "" || status == "" { + return errInvalidArgument("Status is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_status", + value: status, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_status"] = status + return nil +} + +// SetUserMetadata - Set user metadata as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserMetadata(key string, value string) error { + if strings.TrimSpace(key) == "" || key == "" { + return errInvalidArgument("Key is empty") + } + if strings.TrimSpace(value) == "" || value == "" { + return errInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-meta-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// SetUserData - Set user data as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserData(key string, value string) error { + if key == "" { + return errInvalidArgument("Key is empty") + } + if value == "" { + return errInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// addNewPolicy - internal helper to validate adding new policies. +func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { + if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { + return errInvalidArgument("Policy fields are empty.") + } + p.conditions = append(p.conditions, policyCond) + return nil +} + +// String function for printing policy in json formatted string. +func (p PostPolicy) String() string { + return string(p.marshalJSON()) +} + +// marshalJSON - Provides Marshaled JSON in bytes. +func (p PostPolicy) marshalJSON() []byte { + expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` + var conditionsStr string + conditions := []string{} + for _, po := range p.conditions { + conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) + } + if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { + conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", + p.contentLengthRange.min, p.contentLengthRange.max)) + } + if len(conditions) > 0 { + conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" + } + retStr := "{" + retStr = retStr + expirationStr + "," + retStr = retStr + conditionsStr + retStr = retStr + "}" + return []byte(retStr) +} + +// base64 - Produces base64 of PostPolicy's Marshaled json. +func (p PostPolicy) base64() string { + return base64.StdEncoding.EncodeToString(p.marshalJSON()) +} diff --git a/vendor/github.com/minio/minio-go/v7/retry-continous.go b/vendor/github.com/minio/minio-go/v7/retry-continous.go new file mode 100644 index 00000000..3d25883b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/retry-continous.go @@ -0,0 +1,69 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import "time" + +// newRetryTimerContinous creates a timer with exponentially increasing delays forever. +func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { + attemptCh := make(chan int) + + // normalize jitter to the range [0, 1.0] + if jitter < NoJitter { + jitter = NoJitter + } + if jitter > MaxJitter { + jitter = MaxJitter + } + + // computes the exponential backoff duration according to + // https://www.awsarchitectureblog.com/2015/03/backoff.html + exponentialBackoffWait := func(attempt int) time.Duration { + // 1< maxAttempt { + attempt = maxAttempt + } + //sleep = random_between(0, min(cap, base * 2 ** attempt)) + sleep := unit * time.Duration(1< cap { + sleep = cap + } + if jitter != NoJitter { + sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) + } + return sleep + } + + go func() { + defer close(attemptCh) + var nextBackoff int + for { + select { + // Attempts starts. + case attemptCh <- nextBackoff: + nextBackoff++ + case <-doneCh: + // Stop the routine. + return + } + time.Sleep(exponentialBackoffWait(nextBackoff)) + } + }() + return attemptCh +} diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go new file mode 100644 index 00000000..598af297 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/retry.go @@ -0,0 +1,124 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "net/http" + "time" +) + +// MaxRetry is the maximum number of retries before stopping. +var MaxRetry = 10 + +// MaxJitter will randomize over the full exponential backoff time +const MaxJitter = 1.0 + +// NoJitter disables the use of jitter for randomizing the exponential backoff time +const NoJitter = 0.0 + +// DefaultRetryUnit - default unit multiplicative per retry. +// defaults to 200 * time.Millisecond +var DefaultRetryUnit = 200 * time.Millisecond + +// DefaultRetryCap - Each retry attempt never waits no longer than +// this maximum time duration. +var DefaultRetryCap = time.Second + +// newRetryTimer creates a timer with exponentially increasing +// delays until the maximum retry attempts are reached. +func (c Client) newRetryTimer(ctx context.Context, maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int { + attemptCh := make(chan int) + + // computes the exponential backoff duration according to + // https://www.awsarchitectureblog.com/2015/03/backoff.html + exponentialBackoffWait := func(attempt int) time.Duration { + // normalize jitter to the range [0, 1.0] + if jitter < NoJitter { + jitter = NoJitter + } + if jitter > MaxJitter { + jitter = MaxJitter + } + + //sleep = random_between(0, min(cap, base * 2 ** attempt)) + sleep := unit * time.Duration(1< cap { + sleep = cap + } + if jitter != NoJitter { + sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) + } + return sleep + } + + go func() { + defer close(attemptCh) + for i := 0; i < maxRetry; i++ { + select { + case attemptCh <- i + 1: + case <-ctx.Done(): + return + } + + select { + case <-time.After(exponentialBackoffWait(i)): + case <-ctx.Done(): + return + } + } + }() + return attemptCh +} + +// List of AWS S3 error codes which are retryable. +var retryableS3Codes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "InternalError": {}, + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "SlowDown": {}, + // Add more AWS S3 codes here. +} + +// isS3CodeRetryable - is s3 error code retryable. +func isS3CodeRetryable(s3Code string) (ok bool) { + _, ok = retryableS3Codes[s3Code] + return ok +} + +// List of HTTP status codes which are retryable. +var retryableHTTPStatusCodes = map[int]struct{}{ + 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet + http.StatusInternalServerError: {}, + http.StatusBadGateway: {}, + http.StatusServiceUnavailable: {}, + http.StatusGatewayTimeout: {}, + // Add more HTTP status codes here. +} + +// isHTTPStatusRetryable - is HTTP error code retryable. +func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { + _, ok = retryableHTTPStatusCodes[httpStatusCode] + return ok +} diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go new file mode 100644 index 00000000..9c8f02c8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go @@ -0,0 +1,57 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// awsS3EndpointMap Amazon S3 endpoint map. +var awsS3EndpointMap = map[string]string{ + "us-east-1": "s3.dualstack.us-east-1.amazonaws.com", + "us-east-2": "s3.dualstack.us-east-2.amazonaws.com", + "us-west-2": "s3.dualstack.us-west-2.amazonaws.com", + "us-west-1": "s3.dualstack.us-west-1.amazonaws.com", + "ca-central-1": "s3.dualstack.ca-central-1.amazonaws.com", + "eu-west-1": "s3.dualstack.eu-west-1.amazonaws.com", + "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com", + "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", + "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", + "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", + "eu-south-1": "s3.dualstack.eu-south-1.amazonaws.com", + "ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com", + "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", + "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com", + "ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com", + "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", + "ap-northeast-3": "s3.dualstack.ap-northeast-3.amazonaws.com", + "af-south-1": "s3.dualstack.af-south-1.amazonaws.com", + "me-south-1": "s3.dualstack.me-south-1.amazonaws.com", + "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", + "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", + "us-gov-east-1": "s3.dualstack.us-gov-east-1.amazonaws.com", + "cn-north-1": "s3.dualstack.cn-north-1.amazonaws.com.cn", + "cn-northwest-1": "s3.dualstack.cn-northwest-1.amazonaws.com.cn", +} + +// getS3Endpoint get Amazon S3 endpoint based on the bucket location. +func getS3Endpoint(bucketLocation string) (s3Endpoint string) { + s3Endpoint, ok := awsS3EndpointMap[bucketLocation] + if !ok { + // Default to 's3.dualstack.us-east-1.amazonaws.com' endpoint. + s3Endpoint = "s3.dualstack.us-east-1.amazonaws.com" + } + return s3Endpoint +} diff --git a/vendor/github.com/minio/minio-go/v7/s3-error.go b/vendor/github.com/minio/minio-go/v7/s3-error.go new file mode 100644 index 00000000..f365157e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/s3-error.go @@ -0,0 +1,61 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// Non exhaustive list of AWS S3 standard error responses - +// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +var s3ErrorResponseMap = map[string]string{ + "AccessDenied": "Access Denied.", + "BadDigest": "The Content-Md5 you specified did not match what we received.", + "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", + "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", + "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", + "InternalError": "We encountered an internal error, please try again.", + "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", + "InvalidBucketName": "The specified bucket is not valid.", + "InvalidDigest": "The Content-Md5 you specified is not valid.", + "InvalidRange": "The requested range is not satisfiable", + "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", + "MissingContentLength": "You must provide the Content-Length HTTP header.", + "MissingContentMD5": "Missing required header for this request: Content-Md5.", + "MissingRequestBodyError": "Request body is empty.", + "NoSuchBucket": "The specified bucket does not exist.", + "NoSuchBucketPolicy": "The bucket policy does not exist", + "NoSuchKey": "The specified key does not exist.", + "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + "NotImplemented": "A header you provided implies functionality that is not implemented", + "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", + "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", + "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + "MethodNotAllowed": "The specified method is not allowed against this resource.", + "InvalidPart": "One or more of the specified parts could not be found.", + "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + "InvalidObjectState": "The operation is not valid for the current state of the object.", + "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", + "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", + "BucketNotEmpty": "The bucket you tried to delete is not empty", + "AllAccessDisabled": "All access to this bucket has been disabled.", + "MalformedPolicy": "Policy has invalid resource.", + "MissingFields": "Missing fields in request.", + "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", + "InvalidDuration": "Duration provided in the request is invalid.", + "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", + // Add new API errors here. +} diff --git a/vendor/github.com/minio/minio-go/v7/transport.go b/vendor/github.com/minio/minio-go/v7/transport.go new file mode 100644 index 00000000..d5ad15b8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/transport.go @@ -0,0 +1,83 @@ +// +build go1.7 go1.8 + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net" + "net/http" + "os" + "time" +) + +// mustGetSystemCertPool - return system CAs or empty pool in case of error (or windows) +func mustGetSystemCertPool() *x509.CertPool { + pool, err := x509.SystemCertPool() + if err != nil { + return x509.NewCertPool() + } + return pool +} + +// DefaultTransport - this default transport is similar to +// http.DefaultTransport but with additional param DisableCompression +// is set to true to avoid decompressing content with 'gzip' encoding. +var DefaultTransport = func(secure bool) (*http.Transport, error) { + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + MaxIdleConns: 256, + MaxIdleConnsPerHost: 16, + ResponseHeaderTimeout: time.Minute, + IdleConnTimeout: time.Minute, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 10 * time.Second, + // Set this value so that the underlying transport round-tripper + // doesn't try to auto decode the body of objects with + // content-encoding set to `gzip`. + // + // Refer: + // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 + DisableCompression: true, + } + + if secure { + tr.TLSClientConfig = &tls.Config{ + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + MinVersion: tls.VersionTLS12, + } + if f := os.Getenv("SSL_CERT_FILE"); f != "" { + rootCAs := mustGetSystemCertPool() + data, err := ioutil.ReadFile(f) + if err == nil { + rootCAs.AppendCertsFromPEM(data) + } + tr.TLSClientConfig.RootCAs = rootCAs + } + } + return tr, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go new file mode 100644 index 00000000..4bdf1a3c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -0,0 +1,488 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "fmt" + "hash" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "sync" + "time" + + md5simd "github.com/minio/md5-simd" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/sha256-simd" +) + +func trimEtag(etag string) string { + etag = strings.TrimPrefix(etag, "\"") + return strings.TrimSuffix(etag, "\"") +} + +var expirationRegex = regexp.MustCompile(`expiry-date="(.*?)", rule-id="(.*?)"`) + +func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) { + if matches := expirationRegex.FindStringSubmatch(expiration); len(matches) == 3 { + expTime, err := time.Parse(http.TimeFormat, matches[1]) + if err != nil { + return time.Time{}, "" + } + return expTime, matches[2] + } + return time.Time{}, "" +} + +// xmlDecoder provide decoded value in xml. +func xmlDecoder(body io.Reader, v interface{}) error { + d := xml.NewDecoder(body) + return d.Decode(v) +} + +// sum256 calculate sha256sum for an input byte array, returns hex encoded. +func sum256Hex(data []byte) string { + hash := newSHA256Hasher() + defer hash.Close() + hash.Write(data) + return hex.EncodeToString(hash.Sum(nil)) +} + +// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded. +func sumMD5Base64(data []byte) string { + hash := newMd5Hasher() + defer hash.Close() + hash.Write(data) + return base64.StdEncoding.EncodeToString(hash.Sum(nil)) +} + +// getEndpointURL - construct a new endpoint. +func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { + if strings.Contains(endpoint, ":") { + host, _, err := net.SplitHostPort(endpoint) + if err != nil { + return nil, err + } + if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { + msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." + return nil, errInvalidArgument(msg) + } + } else { + if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) { + msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." + return nil, errInvalidArgument(msg) + } + } + // If secure is false, use 'http' scheme. + scheme := "https" + if !secure { + scheme = "http" + } + + // Construct a secured endpoint URL. + endpointURLStr := scheme + "://" + endpoint + endpointURL, err := url.Parse(endpointURLStr) + if err != nil { + return nil, err + } + + // Validate incoming endpoint URL. + if err := isValidEndpointURL(*endpointURL); err != nil { + return nil, err + } + return endpointURL, nil +} + +// closeResponse close non nil response with any response Body. +// convenient wrapper to drain any remaining data on response body. +// +// Subsequently this allows golang http RoundTripper +// to re-use the same connection for future requests. +func closeResponse(resp *http.Response) { + // Callers should close resp.Body when done reading from it. + // If resp.Body is not closed, the Client's underlying RoundTripper + // (typically Transport) may not be able to re-use a persistent TCP + // connection to the server for a subsequent "keep-alive" request. + if resp != nil && resp.Body != nil { + // Drain any remaining Body and then close the connection. + // Without this closing connection would disallow re-using + // the same connection for future uses. + // - http://stackoverflow.com/a/17961593/4465767 + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } +} + +var ( + // Hex encoded string of nil sha256sum bytes. + emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + // Sentinel URL is the default url value which is invalid. + sentinelURL = url.URL{} +) + +// Verify if input endpoint URL is valid. +func isValidEndpointURL(endpointURL url.URL) error { + if endpointURL == sentinelURL { + return errInvalidArgument("Endpoint url cannot be empty.") + } + if endpointURL.Path != "/" && endpointURL.Path != "" { + return errInvalidArgument("Endpoint url cannot have fully qualified paths.") + } + if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") { + if !s3utils.IsAmazonEndpoint(endpointURL) { + return errInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") + } + } + if strings.Contains(endpointURL.Host, ".googleapis.com") { + if !s3utils.IsGoogleEndpoint(endpointURL) { + return errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") + } + } + return nil +} + +// Verify if input expires value is valid. +func isValidExpiry(expires time.Duration) error { + expireSeconds := int64(expires / time.Second) + if expireSeconds < 1 { + return errInvalidArgument("Expires cannot be lesser than 1 second.") + } + if expireSeconds > 604800 { + return errInvalidArgument("Expires cannot be greater than 7 days.") + } + return nil +} + +// Extract only necessary metadata header key/values by +// filtering them out with a list of custom header keys. +func extractObjMetadata(header http.Header) http.Header { + preserveKeys := []string{ + "Content-Type", + "Cache-Control", + "Content-Encoding", + "Content-Language", + "Content-Disposition", + "X-Amz-Storage-Class", + "X-Amz-Object-Lock-Mode", + "X-Amz-Object-Lock-Retain-Until-Date", + "X-Amz-Object-Lock-Legal-Hold", + "X-Amz-Website-Redirect-Location", + "X-Amz-Server-Side-Encryption", + "X-Amz-Tagging-Count", + "X-Amz-Meta-", + // Add new headers to be preserved. + // if you add new headers here, please extend + // PutObjectOptions{} to preserve them + // upon upload as well. + } + filteredHeader := make(http.Header) + for k, v := range header { + var found bool + for _, prefix := range preserveKeys { + if !strings.HasPrefix(k, prefix) { + continue + } + found = true + break + } + if found { + filteredHeader[k] = v + } + } + return filteredHeader +} + +// ToObjectInfo converts http header values into ObjectInfo type, +// extracts metadata and fills in all the necessary fields in ObjectInfo. +func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectInfo, error) { + var err error + // Trim off the odd double quotes from ETag in the beginning and end. + etag := trimEtag(h.Get("ETag")) + + // Parse content length is exists + var size int64 = -1 + contentLengthStr := h.Get("Content-Length") + if contentLengthStr != "" { + size, err = strconv.ParseInt(contentLengthStr, 10, 64) + if err != nil { + // Content-Length is not valid + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: fmt.Sprintf("Content-Length is not an integer, failed with %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + } + + // Parse Last-Modified has http time format. + date, err := time.Parse(http.TimeFormat, h.Get("Last-Modified")) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: fmt.Sprintf("Last-Modified time format is invalid, failed with %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + + // Fetch content type if any present. + contentType := strings.TrimSpace(h.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + + expiryStr := h.Get("Expires") + var expiry time.Time + if expiryStr != "" { + expiry, _ = time.Parse(http.TimeFormat, expiryStr) + } + + metadata := extractObjMetadata(h) + userMetadata := make(map[string]string) + for k, v := range metadata { + if strings.HasPrefix(k, "X-Amz-Meta-") { + userMetadata[strings.TrimPrefix(k, "X-Amz-Meta-")] = v[0] + } + } + userTags := s3utils.TagDecode(h.Get(amzTaggingHeader)) + + var tagCount int + if count := h.Get(amzTaggingCount); count != "" { + tagCount, err = strconv.Atoi(count) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: fmt.Sprintf("x-amz-tagging-count is not an integer, failed with %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration)) + + deleteMarker := h.Get(amzDeleteMarker) == "true" + + // Save object metadata info. + return ObjectInfo{ + ETag: etag, + Key: objectName, + Size: size, + LastModified: date, + ContentType: contentType, + Expires: expiry, + VersionID: h.Get(amzVersionID), + IsDeleteMarker: deleteMarker, + ReplicationStatus: h.Get(amzReplicationStatus), + Expiration: expTime, + ExpirationRuleID: ruleID, + // Extract only the relevant header keys describing the object. + // following function filters out a list of standard set of keys + // which are not part of object metadata. + Metadata: metadata, + UserMetadata: userMetadata, + UserTags: userTags, + UserTagCount: tagCount, + }, nil +} + +var readFull = func(r io.Reader, buf []byte) (n int, err error) { + // ReadFull reads exactly len(buf) bytes from r into buf. + // It returns the number of bytes copied and an error if + // fewer bytes were read. The error is EOF only if no bytes + // were read. If an EOF happens after reading some but not + // all the bytes, ReadFull returns ErrUnexpectedEOF. + // On return, n == len(buf) if and only if err == nil. + // If r returns an error having read at least len(buf) bytes, + // the error is dropped. + for n < len(buf) && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + // Some spurious io.Reader's return + // io.ErrUnexpectedEOF when nn == 0 + // this behavior is undocumented + // so we are on purpose not using io.ReadFull + // implementation because this can lead + // to custom handling, to avoid that + // we simply modify the original io.ReadFull + // implementation to avoid this issue. + // io.ErrUnexpectedEOF with nn == 0 really + // means that io.EOF + if err == io.ErrUnexpectedEOF && nn == 0 { + err = io.EOF + } + n += nn + } + if n >= len(buf) { + err = nil + } else if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// regCred matches credential string in HTTP header +var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") + +// regCred matches signature string in HTTP header +var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") + +// Redact out signature value from authorization string. +func redactSignature(origAuth string) string { + if !strings.HasPrefix(origAuth, signV4Algorithm) { + // Set a temporary redacted auth + return "AWS **REDACTED**:**REDACTED**" + } + + /// Signature V4 authorization header. + + // Strip out accessKeyID from: + // Credential=////aws4_request + newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") + + // Strip out 256-bit signature from: Signature=<256-bit signature> + return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") +} + +// Get default location returns the location based on the input +// URL `u`, if region override is provided then all location +// defaults to regionOverride. +// +// If no other cases match then the location is set to `us-east-1` +// as a last resort. +func getDefaultLocation(u url.URL, regionOverride string) (location string) { + if regionOverride != "" { + return regionOverride + } + region := s3utils.GetRegionFromURL(u) + if region == "" { + region = "us-east-1" + } + return region +} + +var supportedHeaders = []string{ + "content-type", + "cache-control", + "content-encoding", + "content-disposition", + "content-language", + "x-amz-website-redirect-location", + "x-amz-object-lock-mode", + "x-amz-metadata-directive", + "x-amz-object-lock-retain-until-date", + "expires", + "x-amz-replication-status", + // Add more supported headers here. +} + +// isStorageClassHeader returns true if the header is a supported storage class header +func isStorageClassHeader(headerKey string) bool { + return strings.EqualFold(amzStorageClass, headerKey) +} + +// isStandardHeader returns true if header is a supported header and not a custom header +func isStandardHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + for _, header := range supportedHeaders { + if strings.ToLower(header) == key { + return true + } + } + return false +} + +// sseHeaders is list of server side encryption headers +var sseHeaders = []string{ + "x-amz-server-side-encryption", + "x-amz-server-side-encryption-aws-kms-key-id", + "x-amz-server-side-encryption-context", + "x-amz-server-side-encryption-customer-algorithm", + "x-amz-server-side-encryption-customer-key", + "x-amz-server-side-encryption-customer-key-MD5", +} + +// isSSEHeader returns true if header is a server side encryption header. +func isSSEHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + for _, h := range sseHeaders { + if strings.ToLower(h) == key { + return true + } + } + return false +} + +// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header. +func isAmzHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + + return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) +} + +var md5Pool = sync.Pool{New: func() interface{} { return md5.New() }} +var sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }} + +func newMd5Hasher() md5simd.Hasher { + return hashWrapper{Hash: md5Pool.New().(hash.Hash), isMD5: true} +} + +func newSHA256Hasher() md5simd.Hasher { + return hashWrapper{Hash: sha256Pool.New().(hash.Hash), isSHA256: true} +} + +// hashWrapper implements the md5simd.Hasher interface. +type hashWrapper struct { + hash.Hash + isMD5 bool + isSHA256 bool +} + +// Close will put the hasher back into the pool. +func (m hashWrapper) Close() { + if m.isMD5 && m.Hash != nil { + m.Reset() + md5Pool.Put(m.Hash) + } + if m.isSHA256 && m.Hash != nil { + m.Reset() + sha256Pool.Put(m.Hash) + } + m.Hash = nil +} -- cgit v1.2.3