diff options
author | Wim <wim@42.be> | 2021-12-12 00:05:15 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2021-12-12 00:05:15 +0100 |
commit | 3893a035be347a7687a41d2054dd1b274d3a0504 (patch) | |
tree | dfe4a3bf72a0a6356e51bd8fc2e88e9a26e52331 /vendor/github.com | |
parent | 658bdd9faa835660ae407331732e9d93d8f6443b (diff) | |
download | matterbridge-msglm-3893a035be347a7687a41d2054dd1b274d3a0504.tar.gz matterbridge-msglm-3893a035be347a7687a41d2054dd1b274d3a0504.tar.bz2 matterbridge-msglm-3893a035be347a7687a41d2054dd1b274d3a0504.zip |
Update dependencies/vendor (#1659)
Diffstat (limited to 'vendor/github.com')
115 files changed, 3223 insertions, 812 deletions
diff --git a/vendor/github.com/Rhymen/go-whatsapp/README.md b/vendor/github.com/Rhymen/go-whatsapp/README.md index 7f12dd72..ea66f5bb 100644 --- a/vendor/github.com/Rhymen/go-whatsapp/README.md +++ b/vendor/github.com/Rhymen/go-whatsapp/README.md @@ -70,7 +70,7 @@ func (myHandler) HandleContactMessage(message whatsapp.ContactMessage) { fmt.Println(message) } -func (myHandler) HandleBatteryMessage(msg whatsapp.BatteryMessage) { +func (myHandler) HandleBatteryMessage(message whatsapp.BatteryMessage) { fmt.Println(message) } diff --git a/vendor/github.com/Rhymen/go-whatsapp/session.go b/vendor/github.com/Rhymen/go-whatsapp/session.go index 63a6905c..215fb599 100644 --- a/vendor/github.com/Rhymen/go-whatsapp/session.go +++ b/vendor/github.com/Rhymen/go-whatsapp/session.go @@ -526,5 +526,7 @@ func (wac *Conn) Logout() error { return fmt.Errorf("error writing logout: %v\n", err) } + wac.loggedIn = false + return nil } diff --git a/vendor/github.com/SevereCloud/vksdk/v2/.golangci.yml b/vendor/github.com/SevereCloud/vksdk/v2/.golangci.yml index 95108696..cb8abdca 100644 --- a/vendor/github.com/SevereCloud/vksdk/v2/.golangci.yml +++ b/vendor/github.com/SevereCloud/vksdk/v2/.golangci.yml @@ -48,6 +48,11 @@ linters: - nilerr - revive - wastedassign + - bidichk + - contextcheck + - ireturn + - nilnil + - tenv # - wrapcheck # TODO: v3 Fix # - testpackage # TODO: Fix testpackage @@ -75,6 +80,8 @@ linters: # - cyclop # - promlinter # - tagliatelle +# - errname +# - varnamelen # depricated # - maligned diff --git a/vendor/github.com/SevereCloud/vksdk/v2/.travis.yml b/vendor/github.com/SevereCloud/vksdk/v2/.travis.yml deleted file mode 100644 index 47727020..00000000 --- a/vendor/github.com/SevereCloud/vksdk/v2/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -language: go - -cache: - directories: - - $HOME/.cache/go-build - - $HOME/gopath/pkg/mod - -go: - - 1.x - -before_script: - - git fetch --depth=1 origin +refs/tags/*:refs/tags/* - - git describe --tags $(git rev-list --tags --max-count=1) --always - -script: - - go test -v -race -coverprofile=coverage.txt -covermode=atomic -p=1 ./... - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/SevereCloud/vksdk/v2/CONTRIBUTING.md b/vendor/github.com/SevereCloud/vksdk/v2/CONTRIBUTING.md index e78a83ea..7606cad8 100644 --- a/vendor/github.com/SevereCloud/vksdk/v2/CONTRIBUTING.md +++ b/vendor/github.com/SevereCloud/vksdk/v2/CONTRIBUTING.md @@ -39,6 +39,7 @@ golangci-lint run # CLIENT_SECRET="" # USER_TOKEN="" # WIDGET_TOKEN="" +# MARUSIA_TOKEN="" # CLIENT_ID="123456" # GROUP_ID="123456" # ACCOUNT_ID="123456" @@ -56,6 +57,7 @@ go test ./... "go.testEnvVars": { "SERVICE_TOKEN": "", "WIDGET_TOKEN": "", + "MARUSIA_TOKEN": "", "GROUP_TOKEN": "", "CLIENT_SECRET": "", "USER_TOKEN": "", diff --git a/vendor/github.com/SevereCloud/vksdk/v2/README.md b/vendor/github.com/SevereCloud/vksdk/v2/README.md index c3ddd2d3..60e89dd4 100644 --- a/vendor/github.com/SevereCloud/vksdk/v2/README.md +++ b/vendor/github.com/SevereCloud/vksdk/v2/README.md @@ -1,125 +1,124 @@ -# VK SDK for Golang - -[![Build Status](https://travis-ci.com/SevereCloud/vksdk.svg?branch=master)](https://travis-ci.com/SevereCloud/vksdk) -[![PkgGoDev](https://pkg.go.dev/badge/github.com/SevereCloud/vksdk/v2/v2)](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2?tab=subdirectories) -[![VK Developers](https://img.shields.io/badge/developers-%234a76a8.svg?logo=VK&logoColor=white)](https://vk.com/dev/) -[![codecov](https://codecov.io/gh/SevereCloud/vksdk/branch/master/graph/badge.svg)](https://codecov.io/gh/SevereCloud/vksdk) -[![VK chat](https://img.shields.io/badge/VK%20chat-%234a76a8.svg?logo=VK&logoColor=white)](https://vk.me/join/AJQ1d6Or8Q00Y_CSOESfbqGt) -[![release](https://img.shields.io/github/v/tag/SevereCloud/vksdk?label=release)](https://github.com/SevereCloud/vksdk/releases) -[![license](https://img.shields.io/github/license/SevereCloud/vksdk.svg?maxAge=2592000)](https://github.com/SevereCloud/vksdk/blob/master/LICENSE) - -**VK SDK for Golang** ready implementation of the main VK API functions for Go. - -[Russian documentation](https://github.com/SevereCloud/vksdk/wiki) - -## Features - -Version API 5.131. - -- [API](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/api) - - 400+ methods - - Ability to change the request handler - - Ability to modify HTTP client - - Request Limiter - - Token pool -- [Callback API](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/callback) - - Tracking tool for users activity in your VK communities - - Supports all events - - Auto setting callback -- [Bots Long Poll API](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/longpoll-bot) - - Allows you to work with community events in real time - - Supports all events - - Ability to modify HTTP client -- [User Long Poll API](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/longpoll-user) - - Allows you to work with user events in real time - - Ability to modify HTTP client -- [Streaming API](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/streaming) - - Receiving public data from VK by specified keywords - - Ability to modify HTTP client -- [FOAF](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/foaf) - - Machine-readable ontology describing persons - - Works with users and groups - - The only place to get page creation date -- [Games](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/games) - - Checking launch parameters - - Intermediate http handler -- [VK Mini Apps](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/vkapps) - - Checking launch parameters - - Intermediate http handler -- [Payments API](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/payments) - - Processes payment notifications -- [Marusia Skills](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/marusia) - - For creating Marusia Skills - - Support SSML - -## Install - -```bash -# go mod init mymodulename -go get github.com/SevereCloud/vksdk/v2@latest -``` - -## Use by - -- [Joe](https://github.com/go-joe/joe) adapter: <https://github.com/tdakkota/joe-vk-adapter> -- [Logrus](https://github.com/sirupsen/logrus) hook: <https://github.com/SevereCloud/vkrus> - -### Example - -```go -package main - -import ( - "context" - "log" - - "github.com/SevereCloud/vksdk/v2/api" - "github.com/SevereCloud/vksdk/v2/api/params" - "github.com/SevereCloud/vksdk/v2/events" - "github.com/SevereCloud/vksdk/v2/longpoll-bot" -) - -func main() { - token := "<TOKEN>" // use os.Getenv("TOKEN") - vk := api.NewVK(token) - - // get information about the group - group, err := vk.GroupsGetByID(nil) - if err != nil { - log.Fatal(err) - } - - // Initializing Long Poll - lp, err := longpoll.NewLongPoll(vk, group[0].ID) - if err != nil { - log.Fatal(err) - } - - // New message event - lp.MessageNew(func(_ context.Context, obj events.MessageNewObject) { - log.Printf("%d: %s", obj.Message.PeerID, obj.Message.Text) - - if obj.Message.Text == "ping" { - b := params.NewMessagesSendBuilder() - b.Message("pong") - b.RandomID(0) - b.PeerID(obj.Message.PeerID) - - _, err := vk.MessagesSend(b.Params) - if err != nil { - log.Fatal(err) - } - } - }) - - // Run Bots Long Poll - log.Println("Start Long Poll") - if err := lp.Run(); err != nil { - log.Fatal(err) - } -} -``` - -## LICENSE - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2FSevereCloud%2Fvksdk.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2FSevereCloud%2Fvksdk?ref=badge_large) +# VK SDK for Golang
+
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/SevereCloud/vksdk/v2/v2)](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2?tab=subdirectories)
+[![VK Developers](https://img.shields.io/badge/developers-%234a76a8.svg?logo=VK&logoColor=white)](https://vk.com/dev/)
+[![codecov](https://codecov.io/gh/SevereCloud/vksdk/branch/master/graph/badge.svg)](https://codecov.io/gh/SevereCloud/vksdk)
+[![VK chat](https://img.shields.io/badge/VK%20chat-%234a76a8.svg?logo=VK&logoColor=white)](https://vk.me/join/AJQ1d6Or8Q00Y_CSOESfbqGt)
+[![release](https://img.shields.io/github/v/tag/SevereCloud/vksdk?label=release)](https://github.com/SevereCloud/vksdk/releases)
+[![license](https://img.shields.io/github/license/SevereCloud/vksdk.svg?maxAge=2592000)](https://github.com/SevereCloud/vksdk/blob/master/LICENSE)
+
+**VK SDK for Golang** ready implementation of the main VK API functions for Go.
+
+[Russian documentation](https://github.com/SevereCloud/vksdk/wiki)
+
+## Features
+
+Version API 5.131.
+
+- [API](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/api)
+ - 400+ methods
+ - Ability to change the request handler
+ - Ability to modify HTTP client
+ - Request Limiter
+ - Token pool
+- [Callback API](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/callback)
+ - Tracking tool for users activity in your VK communities
+ - Supports all events
+ - Auto setting callback
+- [Bots Long Poll API](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/longpoll-bot)
+ - Allows you to work with community events in real time
+ - Supports all events
+ - Ability to modify HTTP client
+- [User Long Poll API](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/longpoll-user)
+ - Allows you to work with user events in real time
+ - Ability to modify HTTP client
+- [Streaming API](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/streaming)
+ - Receiving public data from VK by specified keywords
+ - Ability to modify HTTP client
+- [FOAF](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/foaf)
+ - Machine-readable ontology describing persons
+ - Works with users and groups
+ - The only place to get page creation date
+- [Games](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/games)
+ - Checking launch parameters
+ - Intermediate http handler
+- [VK Mini Apps](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/vkapps)
+ - Checking launch parameters
+ - Intermediate http handler
+- [Payments API](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/payments)
+ - Processes payment notifications
+- [Marusia Skills](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2/marusia)
+ - For creating Marusia Skills
+ - Support SSML
+
+## Install
+
+```bash
+# go mod init mymodulename
+go get github.com/SevereCloud/vksdk/v2@latest
+```
+
+## Use by
+
+- [Joe](https://github.com/go-joe/joe) adapter: <https://github.com/tdakkota/joe-vk-adapter>
+- [Logrus](https://github.com/sirupsen/logrus) hook: <https://github.com/SevereCloud/vkrus>
+
+### Example
+
+```go
+package main
+
+import (
+ "context"
+ "log"
+
+ "github.com/SevereCloud/vksdk/v2/api"
+ "github.com/SevereCloud/vksdk/v2/api/params"
+ "github.com/SevereCloud/vksdk/v2/events"
+ "github.com/SevereCloud/vksdk/v2/longpoll-bot"
+)
+
+func main() {
+ token := "<TOKEN>" // use os.Getenv("TOKEN")
+ vk := api.NewVK(token)
+
+ // get information about the group
+ group, err := vk.GroupsGetByID(nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Initializing Long Poll
+ lp, err := longpoll.NewLongPoll(vk, group[0].ID)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // New message event
+ lp.MessageNew(func(_ context.Context, obj events.MessageNewObject) {
+ log.Printf("%d: %s", obj.Message.PeerID, obj.Message.Text)
+
+ if obj.Message.Text == "ping" {
+ b := params.NewMessagesSendBuilder()
+ b.Message("pong")
+ b.RandomID(0)
+ b.PeerID(obj.Message.PeerID)
+
+ _, err := vk.MessagesSend(b.Params)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+ })
+
+ // Run Bots Long Poll
+ log.Println("Start Long Poll")
+ if err := lp.Run(); err != nil {
+ log.Fatal(err)
+ }
+}
+```
+
+## LICENSE
+
+[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2FSevereCloud%2Fvksdk.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2FSevereCloud%2Fvksdk?ref=badge_large)
diff --git a/vendor/github.com/SevereCloud/vksdk/v2/api/errors.go b/vendor/github.com/SevereCloud/vksdk/v2/api/errors.go index e9ea1310..03032aa6 100644 --- a/vendor/github.com/SevereCloud/vksdk/v2/api/errors.go +++ b/vendor/github.com/SevereCloud/vksdk/v2/api/errors.go @@ -159,6 +159,9 @@ const ( ErrRateLimit ErrorType = 29 ErrPrivateProfile ErrorType = 30 // This profile is private + // Client version deprecated. + ErrClientVersionDeprecated ErrorType = 34 + // Method execution was interrupted due to timeout. ErrExecutionTimeout ErrorType = 36 @@ -177,6 +180,9 @@ const ( // Additional signup required. ErrAdditionalSignupRequired ErrorType = 41 + // IP is not allowed. + ErrIPNotAllowed ErrorType = 42 + // One of the parameters specified was missing or invalid // // Check the required parameters list and their format on a method @@ -586,6 +592,12 @@ const ( // Can't send message, reply timed out. ErrMessagesReplyTimedOut ErrorType = 950 + // You can't access donut chat without subscription. + ErrMessagesAccessDonutChat ErrorType = 962 + + // This user can't be added to the work chat, as they aren't an employe. + ErrMessagesAccessWorkChat ErrorType = 967 + // Invalid phone number. ErrParamPhone ErrorType = 1000 @@ -598,6 +610,12 @@ const ( // Processing.. Try later. ErrAuthDelay ErrorType = 1112 + // Anonymous token has expired. + ErrAnonymousTokenExpired ErrorType = 1114 + + // Anonymous token is invalid. + ErrAnonymousTokenInvalid ErrorType = 1116 + // Invalid document id. ErrParamDocID ErrorType = 1150 @@ -724,6 +742,9 @@ const ( // Market was already disabled in this group. ErrMarketAlreadyDisabled ErrorType = 1432 + // Main album can not be hidden. + ErrMainAlbumCantHidden ErrorType = 1446 + // Story has already expired. ErrStoryExpired ErrorType = 1600 @@ -783,6 +804,33 @@ const ( // Can't set AliExpress tag to this type of object. ErrAliExpressTag ErrorType = 3800 + + // Invalid upload response. + ErrInvalidUploadResponse ErrorType = 5701 + + // Invalid upload hash. + ErrInvalidUploadHash ErrorType = 5702 + + // Invalid upload user. + ErrInvalidUploadUser ErrorType = 5703 + + // Invalid upload group. + ErrInvalidUploadGroup ErrorType = 5704 + + // Invalid crop data. + ErrInvalidCropData ErrorType = 5705 + + // To small avatar. + ErrToSmallAvatar ErrorType = 5706 + + // Photo not found. + ErrPhotoNotFound ErrorType = 5708 + + // Invalid Photo. + ErrInvalidPhoto ErrorType = 5709 + + // Invalid hash. + ErrInvalidHash ErrorType = 5710 ) // ErrorSubtype is the subtype of an error. diff --git a/vendor/github.com/SevereCloud/vksdk/v2/api/execute.go b/vendor/github.com/SevereCloud/vksdk/v2/api/execute.go index 5bee205a..cc52cd0d 100644 --- a/vendor/github.com/SevereCloud/vksdk/v2/api/execute.go +++ b/vendor/github.com/SevereCloud/vksdk/v2/api/execute.go @@ -22,6 +22,9 @@ func (vk *VK) ExecuteWithArgs(code string, params Params, obj interface{}) error } resp, err := vk.Handler("execute", params, reqParams) + if err != nil { + return err + } jsonErr := json.Unmarshal(resp.Response, &obj) if jsonErr != nil { diff --git a/vendor/github.com/SevereCloud/vksdk/v2/api/market.go b/vendor/github.com/SevereCloud/vksdk/v2/api/market.go index f78a068d..0bb35687 100644 --- a/vendor/github.com/SevereCloud/vksdk/v2/api/market.go +++ b/vendor/github.com/SevereCloud/vksdk/v2/api/market.go @@ -318,3 +318,19 @@ func (vk *VK) MarketSearch(params Params) (response MarketSearchResponse, err er err = vk.RequestUnmarshal("market.search", &response, params) return } + +// MarketSearchItemsResponse struct. +type MarketSearchItemsResponse struct { + Count int `json:"count"` + ViewType int `json:"view_type"` + Items []object.MarketMarketItem `json:"items"` + Groups []object.GroupsGroup `json:"groups,omitempty"` +} + +// MarketSearchItems method. +// +// https://vk.com/dev/market.searchItems +func (vk *VK) MarketSearchItems(params Params) (response MarketSearchItemsResponse, err error) { + err = vk.RequestUnmarshal("market.searchItems", &response, params) + return +} diff --git a/vendor/github.com/SevereCloud/vksdk/v2/api/marusia.go b/vendor/github.com/SevereCloud/vksdk/v2/api/marusia.go new file mode 100644 index 00000000..729dbc80 --- /dev/null +++ b/vendor/github.com/SevereCloud/vksdk/v2/api/marusia.go @@ -0,0 +1,103 @@ +package api // import "github.com/SevereCloud/vksdk/v2/api" + +import ( + "github.com/SevereCloud/vksdk/v2/object" +) + +// MarusiaGetPictureUploadLinkResponse struct. +type MarusiaGetPictureUploadLinkResponse struct { + PictureUploadLink string `json:"picture_upload_link"` // Link +} + +// MarusiaGetPictureUploadLink method. +// +// https://vk.com/dev/marusia_skill_docs10 +func (vk *VK) MarusiaGetPictureUploadLink(params Params) (response MarusiaGetPictureUploadLinkResponse, err error) { + err = vk.RequestUnmarshal("marusia.getPictureUploadLink", &response, params) + return +} + +// MarusiaSavePictureResponse struct. +type MarusiaSavePictureResponse struct { + AppID int `json:"app_id"` + PhotoID int `json:"photo_id"` +} + +// MarusiaSavePicture method. +// +// https://vk.com/dev/marusia_skill_docs10 +func (vk *VK) MarusiaSavePicture(params Params) (response MarusiaSavePictureResponse, err error) { + err = vk.RequestUnmarshal("marusia.savePicture", &response, params) + return +} + +// MarusiaGetPicturesResponse struct. +type MarusiaGetPicturesResponse struct { + Count int `json:"count"` + Items []object.MarusiaPicture `json:"items"` +} + +// MarusiaGetPictures method. +// +// https://vk.com/dev/marusia_skill_docs10 +func (vk *VK) MarusiaGetPictures(params Params) (response MarusiaGetPicturesResponse, err error) { + err = vk.RequestUnmarshal("marusia.getPictures", &response, params) + return +} + +// MarusiaDeletePicture delete picture. +// +// https://vk.com/dev/marusia_skill_docs10 +func (vk *VK) MarusiaDeletePicture(params Params) (response int, err error) { + err = vk.RequestUnmarshal("marusia.deletePicture", &response, params) + return +} + +// MarusiaGetAudioUploadLinkResponse struct. +type MarusiaGetAudioUploadLinkResponse struct { + AudioUploadLink string `json:"audio_upload_link"` // Link +} + +// MarusiaGetAudioUploadLink method. +// +// https://vk.com/dev/marusia_skill_docs10 +func (vk *VK) MarusiaGetAudioUploadLink(params Params) (response MarusiaGetAudioUploadLinkResponse, err error) { + err = vk.RequestUnmarshal("marusia.getAudioUploadLink", &response, params) + return +} + +// MarusiaCreateAudioResponse struct. +type MarusiaCreateAudioResponse struct { + ID int `json:"id"` + Title string `json:"title"` +} + +// MarusiaCreateAudio method. +// +// https://vk.com/dev/marusia_skill_docs10 +func (vk *VK) MarusiaCreateAudio(params Params) (response MarusiaCreateAudioResponse, err error) { + err = vk.RequestUnmarshal("marusia.createAudio", &response, params) + return +} + +// MarusiaGetAudiosResponse struct. +type MarusiaGetAudiosResponse struct { + Count int `json:"count"` + Audios []object.MarusiaAudio `json:"audios"` +} + +// MarusiaGetAudios method. +// +// https://vk.com/dev/marusia_skill_docs10 +func (vk *VK) MarusiaGetAudios(params Params) (response MarusiaGetAudiosResponse, err error) { + err = vk.RequestUnmarshal("marusia.getAudios", &response, params) + return +} + +// MarusiaDeleteAudio delete audio. +// +// https://vk.com/dev/marusia_skill_docs10 +func (vk *VK) MarusiaDeleteAudio(params Params) (response int, err error) { + err = vk.RequestUnmarshal("marusia.deleteAudio", &response, params) + return +} diff --git a/vendor/github.com/SevereCloud/vksdk/v2/api/upload.go b/vendor/github.com/SevereCloud/vksdk/v2/api/upload.go index 2c947c5d..59f65460 100644 --- a/vendor/github.com/SevereCloud/vksdk/v2/api/upload.go +++ b/vendor/github.com/SevereCloud/vksdk/v2/api/upload.go @@ -959,3 +959,57 @@ func (vk *VK) UploadGroupImage(imageType string, file io.Reader) (response objec return } + +// UploadMarusiaPicture uploading picture. +// +// Limits: height not more than 600 px, +// aspect ratio of at least 2:1. +func (vk *VK) UploadMarusiaPicture(file io.Reader) (response MarusiaSavePictureResponse, err error) { + uploadServer, err := vk.MarusiaGetPictureUploadLink(nil) + if err != nil { + return + } + + bodyContent, err := vk.UploadFile(uploadServer.PictureUploadLink, file, "photo", "photo.jpg") + if err != nil { + return + } + + var handler object.MarusiaPictureUploadResponse + + err = json.Unmarshal(bodyContent, &handler) + if err != nil { + return + } + + photo, _ := json.Marshal(handler.Photo) + + response, err = vk.MarusiaSavePicture(Params{ + "server": handler.Server, + "photo": string(photo), + "hash": handler.Hash, + }) + + return +} + +// UploadMarusiaAudio uploading audio. +// +// https://vk.com/dev/marusia_skill_docs10 +func (vk *VK) UploadMarusiaAudio(file io.Reader) (response MarusiaCreateAudioResponse, err error) { + uploadServer, err := vk.MarusiaGetAudioUploadLink(nil) + if err != nil { + return + } + + bodyContent, err := vk.UploadFile(uploadServer.AudioUploadLink, file, "file", "audio.mp3") + if err != nil { + return + } + + response, err = vk.MarusiaCreateAudio(Params{ + "audio_meta": string(bodyContent), + }) + + return +} diff --git a/vendor/github.com/SevereCloud/vksdk/v2/doc.go b/vendor/github.com/SevereCloud/vksdk/v2/doc.go index 1be7800e..61e19efe 100644 --- a/vendor/github.com/SevereCloud/vksdk/v2/doc.go +++ b/vendor/github.com/SevereCloud/vksdk/v2/doc.go @@ -7,6 +7,6 @@ package vksdk // Module constants. const ( - Version = "2.10.0" + Version = "2.11.0" API = "5.131" ) diff --git a/vendor/github.com/SevereCloud/vksdk/v2/object/market.go b/vendor/github.com/SevereCloud/vksdk/v2/object/market.go index a74f8b83..a1d75213 100644 --- a/vendor/github.com/SevereCloud/vksdk/v2/object/market.go +++ b/vendor/github.com/SevereCloud/vksdk/v2/object/market.go @@ -28,6 +28,8 @@ type MarketMarketAlbum struct { Photo PhotosPhoto `json:"photo"` Title string `json:"title"` // Market album title UpdatedTime int `json:"updated_time"` // Date when album has been updated last time in Unixtime + IsMain BaseBoolInt `json:"is_main"` + IsHidden BaseBoolInt `json:"is_hidden"` } // ToAttachment return attachment format. diff --git a/vendor/github.com/SevereCloud/vksdk/v2/object/marusia.go b/vendor/github.com/SevereCloud/vksdk/v2/object/marusia.go new file mode 100644 index 00000000..a6671d4d --- /dev/null +++ b/vendor/github.com/SevereCloud/vksdk/v2/object/marusia.go @@ -0,0 +1,52 @@ +package object // import "github.com/SevereCloud/vksdk/v2/object" + +import ( + "encoding/json" +) + +// MarusiaPicture struct. +type MarusiaPicture struct { + ID int `json:"id"` + OwnerID int `json:"owner_id"` +} + +// MarusiaPictureUploadResponse struct. +type MarusiaPictureUploadResponse struct { + Hash string `json:"hash"` // Uploading hash + Photo json.RawMessage `json:"photo"` // Uploaded photo data + Server int `json:"server"` // Upload server number + AID int `json:"aid"` + MessageCode int `json:"message_code"` +} + +// MarusiaAudio struct. +type MarusiaAudio struct { + ID int `json:"id"` + Title string `json:"title"` + OwnerID int `json:"owner_id"` +} + +// MarusiaAudioUploadResponse struct. +type MarusiaAudioUploadResponse struct { + Sha string `json:"sha"` + Secret string `json:"secret"` + Meta MarusiaAudioMeta `json:"meta"` + Hash string `json:"hash"` + Server string `json:"server"` + UserID int `json:"user_id"` + RequestID string `json:"request_id"` +} + +// MarusiaAudioMeta struct. +type MarusiaAudioMeta struct { + Album string `json:"album"` + Artist string `json:"artist"` + Bitrate string `json:"bitrate"` + Duration string `json:"duration"` + Genre string `json:"genre"` + Kad string `json:"kad"` + Md5 string `json:"md5"` + Md5DataSize string `json:"md5_data_size"` + Samplerate string `json:"samplerate"` + Title string `json:"title"` +} diff --git a/vendor/github.com/SevereCloud/vksdk/v2/object/messages.go b/vendor/github.com/SevereCloud/vksdk/v2/object/messages.go index c6fcf5d8..4c553486 100644 --- a/vendor/github.com/SevereCloud/vksdk/v2/object/messages.go +++ b/vendor/github.com/SevereCloud/vksdk/v2/object/messages.go @@ -375,17 +375,17 @@ type MessagesTemplateElement struct { // MessagesTemplateElementCarousel struct. type MessagesTemplateElementCarousel struct { - Title string `json:"title"` - Action MessagesTemplateElementCarouselAction `json:"action"` - Description string `json:"description"` - Photo PhotosPhoto `json:"photo"` - Buttons []MessagesKeyboardButton `json:"buttons"` + Title string `json:"title,omitempty"` + Action MessagesTemplateElementCarouselAction `json:"action,omitempty"` + Description string `json:"description,omitempty"` + Photo *PhotosPhoto `json:"photo,omitempty"` + Buttons []MessagesKeyboardButton `json:"buttons,omitempty"` } // MessagesTemplateElementCarouselAction struct. type MessagesTemplateElementCarouselAction struct { Type string `json:"type"` - Link string `json:"link"` + Link string `json:"link,omitempty"` } // MessageContentSourceMessage ... @@ -443,6 +443,7 @@ type MessagesChat struct { AdminID int `json:"admin_id"` // Chat creator ID ID int `json:"id"` // Chat ID IsDefaultPhoto BaseBoolInt `json:"is_default_photo"` + IsGroupChannel BaseBoolInt `json:"is_group_channel"` Photo100 string `json:"photo_100"` // URL of the preview image with 100 px in width Photo200 string `json:"photo_200"` // URL of the preview image with 200 px in width Photo50 string `json:"photo_50"` // URL of the preview image with 50 px in width diff --git a/vendor/github.com/SevereCloud/vksdk/v2/object/stories.go b/vendor/github.com/SevereCloud/vksdk/v2/object/stories.go index 248fd8c0..c04ab653 100644 --- a/vendor/github.com/SevereCloud/vksdk/v2/object/stories.go +++ b/vendor/github.com/SevereCloud/vksdk/v2/object/stories.go @@ -251,8 +251,10 @@ type StoriesClickableSticker struct { // nolint: maligned StickerID int `json:"sticker_id,omitempty"` StickerPackID int `json:"sticker_pack_id,omitempty"` - // type=place + // type=place or geo PlaceID int `json:"place_id,omitempty"` + // Title + CategoryID int `json:"category_id,omitempty"` // type=question Question string `json:"question,omitempty"` @@ -267,8 +269,14 @@ type StoriesClickableSticker struct { // nolint: maligned Hashtag string `json:"hashtag,omitempty"` // type=link - LinkObject BaseLink `json:"link_object,omitempty"` - TooltipText string `json:"tooltip_text,omitempty"` + LinkObject BaseLink `json:"link_object,omitempty"` + TooltipText string `json:"tooltip_text,omitempty"` + TooltipTextKey string `json:"tooltip_text_key,omitempty"` + + // type=time + TimestampMs int64 `json:"timestamp_ms,omitempty"` + Date string `json:"date,omitempty"` + Title string `json:"title,omitempty"` // type=market_item Subtype string `json:"subtype,omitempty"` @@ -290,10 +298,19 @@ type StoriesClickableSticker struct { // nolint: maligned AudioStartTime int `json:"audio_start_time,omitempty"` // type=app - App AppsApp `json:"app"` - AppContext string `json:"app_context"` - HasNewInteractions BaseBoolInt `json:"has_new_interactions"` - IsBroadcastNotifyAllowed BaseBoolInt `json:"is_broadcast_notify_allowed"` + App AppsApp `json:"app,omitempty"` + AppContext string `json:"app_context,omitempty"` + HasNewInteractions BaseBoolInt `json:"has_new_interactions,omitempty"` + IsBroadcastNotifyAllowed BaseBoolInt `json:"is_broadcast_notify_allowed,omitempty"` + + // type=emoji + Emoji string `json:"emoji,omitempty"` + + // type=text + Text string `json:"text,omitempty"` + BackgroundStyle string `json:"background_style,omitempty"` + Alignment string `json:"alignment,omitempty"` + SelectionColor string `json:"selection_color,omitempty"` } // TODO: сделать несколько структур для кликабельного стикера @@ -313,6 +330,10 @@ const ( ClickableStickerPoll = "poll" ClickableStickerMusic = "music" ClickableStickerApp = "app" + ClickableStickerTime = "time" + ClickableStickerEmoji = "emoji" + ClickableStickerGeo = "geo" + ClickableStickerText = "text" ) // Subtype of clickable sticker. diff --git a/vendor/github.com/SevereCloud/vksdk/v2/object/video.go b/vendor/github.com/SevereCloud/vksdk/v2/object/video.go index 0816af24..5a7e9e8d 100644 --- a/vendor/github.com/SevereCloud/vksdk/v2/object/video.go +++ b/vendor/github.com/SevereCloud/vksdk/v2/object/video.go @@ -213,6 +213,7 @@ type VideoVideoFull struct { Description string `json:"description"` // Video description Duration int `json:"duration"` // Video duration in seconds Files VideoVideoFiles `json:"files"` + Trailer VideoVideoFiles `json:"trailer"` ID int `json:"id"` // Video ID Likes BaseLikes `json:"likes"` Live int `json:"live"` // Returns if the video is live translation diff --git a/vendor/github.com/d5/tengo/v2/README.md b/vendor/github.com/d5/tengo/v2/README.md index fd214cd9..c19c5699 100644 --- a/vendor/github.com/d5/tengo/v2/README.md +++ b/vendor/github.com/d5/tengo/v2/README.md @@ -1,7 +1,3 @@ -<p align="center"> - <img src="https://raw.githubusercontent.com/d5/tengolang-share/master/logo_400.png" width="200" height="200"> -</p> - # The Tengo Language [![GoDoc](https://godoc.org/github.com/d5/tengo/v2?status.svg)](https://godoc.org/github.com/d5/tengo/v2) diff --git a/vendor/github.com/d5/tengo/v2/compiler.go b/vendor/github.com/d5/tengo/v2/compiler.go index 53cc7d38..e4e04303 100644 --- a/vendor/github.com/d5/tengo/v2/compiler.go +++ b/vendor/github.com/d5/tengo/v2/compiler.go @@ -1,9 +1,11 @@ package tengo import ( + "errors" "fmt" "io" "io/ioutil" + "os" "path/filepath" "reflect" "strings" @@ -45,11 +47,12 @@ type Compiler struct { parent *Compiler modulePath string importDir string + importFileExt []string constants []Object symbolTable *SymbolTable scopes []compilationScope scopeIndex int - modules *ModuleMap + modules ModuleGetter compiledModules map[string]*CompiledFunction allowFileImport bool loops []*loop @@ -63,7 +66,7 @@ func NewCompiler( file *parser.SourceFile, symbolTable *SymbolTable, constants []Object, - modules *ModuleMap, + modules ModuleGetter, trace io.Writer, ) *Compiler { mainScope := compilationScope{ @@ -96,6 +99,7 @@ func NewCompiler( trace: trace, modules: modules, compiledModules: make(map[string]*CompiledFunction), + importFileExt: []string{SourceFileExtDefault}, } } @@ -538,12 +542,8 @@ func (c *Compiler) Compile(node parser.Node) error { } } else if c.allowFileImport { moduleName := node.ModuleName - if !strings.HasSuffix(moduleName, ".tengo") { - moduleName += ".tengo" - } - modulePath, err := filepath.Abs( - filepath.Join(c.importDir, moduleName)) + modulePath, err := c.getPathModule(moduleName) if err != nil { return c.errorf(node, "module file path error: %s", err.Error()) @@ -640,6 +640,39 @@ func (c *Compiler) SetImportDir(dir string) { c.importDir = dir } +// SetImportFileExt sets the extension name of the source file for loading +// local module files. +// +// Use this method if you want other source file extension than ".tengo". +// +// // this will search for *.tengo, *.foo, *.bar +// err := c.SetImportFileExt(".tengo", ".foo", ".bar") +// +// This function requires at least one argument, since it will replace the +// current list of extension name. +func (c *Compiler) SetImportFileExt(exts ...string) error { + if len(exts) == 0 { + return fmt.Errorf("missing arg: at least one argument is required") + } + + for _, ext := range exts { + if ext != filepath.Ext(ext) || ext == "" { + return fmt.Errorf("invalid file extension: %s", ext) + } + } + + c.importFileExt = exts // Replace the hole current extension list + + return nil +} + +// GetImportFileExt returns the current list of extension name. +// Thease are the complementary suffix of the source file to search and load +// local module files. +func (c *Compiler) GetImportFileExt() []string { + return c.importFileExt +} + func (c *Compiler) compileAssign( node parser.Node, lhs, rhs []parser.Expr, @@ -1098,6 +1131,7 @@ func (c *Compiler) fork( child.parent = c // parent to set to current compiler child.allowFileImport = c.allowFileImport child.importDir = c.importDir + child.importFileExt = c.importFileExt if isFile && c.importDir != "" { child.importDir = filepath.Dir(modulePath) } @@ -1287,6 +1321,28 @@ func (c *Compiler) printTrace(a ...interface{}) { _, _ = fmt.Fprintln(c.trace, a...) } +func (c *Compiler) getPathModule(moduleName string) (pathFile string, err error) { + for _, ext := range c.importFileExt { + nameFile := moduleName + + if !strings.HasSuffix(nameFile, ext) { + nameFile += ext + } + + pathFile, err = filepath.Abs(filepath.Join(c.importDir, nameFile)) + if err != nil { + continue + } + + // Check if file exists + if _, err := os.Stat(pathFile); !errors.Is(err, os.ErrNotExist) { + return pathFile, nil + } + } + + return "", fmt.Errorf("module '%s' not found at: %s", moduleName, pathFile) +} + func resolveAssignLHS( expr parser.Expr, ) (name string, selectors []parser.Expr) { diff --git a/vendor/github.com/d5/tengo/v2/modules.go b/vendor/github.com/d5/tengo/v2/modules.go index c8fcde7f..dadd5a3b 100644 --- a/vendor/github.com/d5/tengo/v2/modules.go +++ b/vendor/github.com/d5/tengo/v2/modules.go @@ -6,6 +6,11 @@ type Importable interface { Import(moduleName string) (interface{}, error) } +// ModuleGetter enables implementing dynamic module loading. +type ModuleGetter interface { + Get(name string) Importable +} + // ModuleMap represents a set of named modules. Use NewModuleMap to create a // new module map. type ModuleMap struct { diff --git a/vendor/github.com/d5/tengo/v2/script.go b/vendor/github.com/d5/tengo/v2/script.go index 46e48029..82b02f52 100644 --- a/vendor/github.com/d5/tengo/v2/script.go +++ b/vendor/github.com/d5/tengo/v2/script.go @@ -12,7 +12,7 @@ import ( // Script can simplify compilation and execution of embedded scripts. type Script struct { variables map[string]*Variable - modules *ModuleMap + modules ModuleGetter input []byte maxAllocs int64 maxConstObjects int @@ -54,7 +54,7 @@ func (s *Script) Remove(name string) bool { } // SetImports sets import modules. -func (s *Script) SetImports(modules *ModuleMap) { +func (s *Script) SetImports(modules ModuleGetter) { s.modules = modules } @@ -219,6 +219,18 @@ func (c *Compiled) RunContext(ctx context.Context) (err error) { v := NewVM(c.bytecode, c.globals, c.maxAllocs) ch := make(chan error, 1) go func() { + defer func() { + if r := recover(); r != nil { + switch e := r.(type) { + case string: + ch <- fmt.Errorf(e) + case error: + ch <- e + default: + ch <- fmt.Errorf("unknown panic: %v", e) + } + } + }() ch <- v.Run() }() diff --git a/vendor/github.com/d5/tengo/v2/tengo.go b/vendor/github.com/d5/tengo/v2/tengo.go index 098a1970..490e9aed 100644 --- a/vendor/github.com/d5/tengo/v2/tengo.go +++ b/vendor/github.com/d5/tengo/v2/tengo.go @@ -26,6 +26,9 @@ const ( // MaxFrames is the maximum number of function frames for a VM. MaxFrames = 1024 + + // SourceFileExtDefault is the default extension for source files. + SourceFileExtDefault = ".tengo" ) // CallableFunc is a function signature for the callable functions. diff --git a/vendor/github.com/d5/tengo/v2/vm.go b/vendor/github.com/d5/tengo/v2/vm.go index 811ecef9..c8365252 100644 --- a/vendor/github.com/d5/tengo/v2/vm.go +++ b/vendor/github.com/d5/tengo/v2/vm.go @@ -293,7 +293,7 @@ func (v *VM) run() { case parser.OpMap: v.ip += 2 numElements := int(v.curInsts[v.ip]) | int(v.curInsts[v.ip-1])<<8 - kv := make(map[string]Object) + kv := make(map[string]Object, numElements) for i := v.sp - numElements; i < v.sp; i += 2 { key := v.stack[i] value := v.stack[i+1] diff --git a/vendor/github.com/gomarkdown/markdown/parser/block.go b/vendor/github.com/gomarkdown/markdown/parser/block.go index 7d7e9f9c..32194d9f 100644 --- a/vendor/github.com/gomarkdown/markdown/parser/block.go +++ b/vendor/github.com/gomarkdown/markdown/parser/block.go @@ -17,6 +17,12 @@ const ( escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]" ) +const ( + captionTable = "Table: " + captionFigure = "Figure: " + captionQuote = "Quote: " +) + var ( reBackslashOrAmp = regexp.MustCompile("[\\&]") reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity) @@ -125,6 +131,16 @@ func (p *Parser) block(data []byte) { } if consumed > 0 { included := f(p.includeStack.Last(), path, address) + + // if we find a caption below this, we need to include it in 'included', so + // that the caption will be part of the include text. (+1 to skip newline) + for _, caption := range []string{captionFigure, captionTable, captionQuote} { + if _, _, capcon := p.caption(data[consumed+1:], []byte(caption)); capcon > 0 { + included = append(included, data[consumed+1:consumed+1+capcon]...) + consumed += 1 + capcon + break // there can only be 1 caption. + } + } p.includeStack.Push(path) p.block(included) p.includeStack.Pop() @@ -295,7 +311,7 @@ func (p *Parser) block(data []byte) { // // also works with + or - if p.uliPrefix(data) > 0 { - data = data[p.list(data, 0, 0):] + data = data[p.list(data, 0, 0, '.'):] continue } @@ -305,14 +321,18 @@ func (p *Parser) block(data []byte) { // 2. Item 2 if i := p.oliPrefix(data); i > 0 { start := 0 - if i > 2 && p.extensions&OrderedListStart != 0 { - s := string(data[:i-2]) - start, _ = strconv.Atoi(s) - if start == 1 { - start = 0 + delim := byte('.') + if i > 2 { + if p.extensions&OrderedListStart != 0 { + s := string(data[:i-2]) + start, _ = strconv.Atoi(s) + if start == 1 { + start = 0 + } } + delim = data[i-2] } - data = data[p.list(data, ast.ListTypeOrdered, start):] + data = data[p.list(data, ast.ListTypeOrdered, start, delim):] continue } @@ -326,7 +346,7 @@ func (p *Parser) block(data []byte) { // : Definition c if p.extensions&DefinitionLists != 0 { if p.dliPrefix(data) > 0 { - data = data[p.list(data, ast.ListTypeDefinition, 0):] + data = data[p.list(data, ast.ListTypeDefinition, 0, '.'):] continue } } @@ -950,7 +970,7 @@ func (p *Parser) fencedCodeBlock(data []byte, doRender bool) int { } // Check for caption and if found make it a figure. - if captionContent, id, consumed := p.caption(data[beg:], []byte("Figure: ")); consumed > 0 { + if captionContent, id, consumed := p.caption(data[beg:], []byte(captionFigure)); consumed > 0 { figure := &ast.CaptionFigure{} caption := &ast.Caption{} figure.HeadingID = id @@ -1070,7 +1090,7 @@ func (p *Parser) quote(data []byte) int { return end } - if captionContent, id, consumed := p.caption(data[end:], []byte("Quote: ")); consumed > 0 { + if captionContent, id, consumed := p.caption(data[end:], []byte(captionQuote)); consumed > 0 { figure := &ast.CaptionFigure{} caption := &ast.Caption{} figure.HeadingID = id @@ -1190,7 +1210,7 @@ func (p *Parser) oliPrefix(data []byte) int { } // we need >= 1 digits followed by a dot and a space or a tab - if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { + if data[i] != '.' && data[i] != ')' || !(data[i+1] == ' ' || data[i+1] == '\t') { return 0 } return i + 2 @@ -1210,13 +1230,14 @@ func (p *Parser) dliPrefix(data []byte) int { } // parse ordered or unordered list block -func (p *Parser) list(data []byte, flags ast.ListType, start int) int { +func (p *Parser) list(data []byte, flags ast.ListType, start int, delim byte) int { i := 0 flags |= ast.ListItemBeginningOfList list := &ast.List{ ListFlags: flags, Tight: true, Start: start, + Delimiter: delim, } block := p.addBlock(list) @@ -1305,10 +1326,16 @@ func (p *Parser) listItem(data []byte, flags *ast.ListType) int { } } - var bulletChar byte = '*' + var ( + bulletChar byte = '*' + delimiter byte = '.' + ) i := p.uliPrefix(data) if i == 0 { i = p.oliPrefix(data) + if i > 0 { + delimiter = data[i-2] + } } else { bulletChar = data[i-2] } @@ -1468,7 +1495,7 @@ gatherlines: ListFlags: *flags, Tight: false, BulletChar: bulletChar, - Delimiter: '.', // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark + Delimiter: delimiter, } p.addBlock(listItem) @@ -1574,7 +1601,7 @@ func (p *Parser) paragraph(data []byte) int { // did this blank line followed by a definition list item? if p.extensions&DefinitionLists != 0 { if i < len(data)-1 && data[i+1] == ':' { - listLen := p.list(data[prev:], ast.ListTypeDefinition, 0) + listLen := p.list(data[prev:], ast.ListTypeDefinition, 0, '.') return prev + listLen } } @@ -1645,10 +1672,18 @@ func (p *Parser) paragraph(data []byte) int { } } + // if there's a table, paragraph is over + if p.extensions&Tables != 0 { + if j, _, _ := p.tableHeader(current, false); j > 0 { + p.renderParagraph(data[:i]) + return i + } + } + // if there's a definition list item, prev line is a definition term if p.extensions&DefinitionLists != 0 { if p.dliPrefix(current) != 0 { - ret := p.list(data[prev:], ast.ListTypeDefinition, 0) + ret := p.list(data[prev:], ast.ListTypeDefinition, 0, '.') return ret + prev } } diff --git a/vendor/github.com/gomarkdown/markdown/parser/block_table.go b/vendor/github.com/gomarkdown/markdown/parser/block_table.go index f6c06dff..53fbd471 100644 --- a/vendor/github.com/gomarkdown/markdown/parser/block_table.go +++ b/vendor/github.com/gomarkdown/markdown/parser/block_table.go @@ -105,7 +105,7 @@ func (p *Parser) tableFooter(data []byte) bool { } // tableHeaders parses the header. If recognized it will also add a table. -func (p *Parser) tableHeader(data []byte) (size int, columns []ast.CellAlignFlags, table ast.Node) { +func (p *Parser) tableHeader(data []byte, doRender bool) (size int, columns []ast.CellAlignFlags, table ast.Node) { i := 0 colCount := 1 headerIsUnderline := true @@ -236,11 +236,13 @@ func (p *Parser) tableHeader(data []byte) (size int, columns []ast.CellAlignFlag return } - table = &ast.Table{} - p.addBlock(table) - if header != nil { - p.addBlock(&ast.TableHeader{}) - p.tableRow(header, columns, true) + if doRender { + table = &ast.Table{} + p.addBlock(table) + if header != nil { + p.addBlock(&ast.TableHeader{}) + p.tableRow(header, columns, true) + } } size = skipCharN(data, i, '\n', 1) return @@ -255,7 +257,7 @@ Bob | 31 | 555-1234 Alice | 27 | 555-4321 */ func (p *Parser) table(data []byte) int { - i, columns, table := p.tableHeader(data) + i, columns, table := p.tableHeader(data, true) if i == 0 { return 0 } @@ -284,7 +286,7 @@ func (p *Parser) table(data []byte) int { p.tableRow(data[rowStart:i], columns, false) } - if captionContent, id, consumed := p.caption(data[i:], []byte("Table: ")); consumed > 0 { + if captionContent, id, consumed := p.caption(data[i:], []byte(captionTable)); consumed > 0 { caption := &ast.Caption{} p.Inline(caption, captionContent) diff --git a/vendor/github.com/gomarkdown/markdown/parser/inline.go b/vendor/github.com/gomarkdown/markdown/parser/inline.go index bc30326d..d68983f7 100644 --- a/vendor/github.com/gomarkdown/markdown/parser/inline.go +++ b/vendor/github.com/gomarkdown/markdown/parser/inline.go @@ -766,7 +766,22 @@ func entity(p *Parser, data []byte, offset int) (int, ast.Node) { // undo & escaping or it will be converted to &amp; by another // escaper in the renderer if bytes.Equal(ent, []byte("&")) { - ent = []byte{'&'} + return end, newTextNode([]byte{'&'}) + } + if len(ent) < 4 { + return end, newTextNode(ent) + } + + // if ent consists solely out of numbers (hex or decimal) convert that unicode codepoint to actual rune + codepoint := uint64(0) + var err error + if ent[2] == 'x' || ent[2] == 'X' { // hexadecimal + codepoint, err = strconv.ParseUint(string(ent[3:len(ent)-1]), 16, 64) + } else { + codepoint, err = strconv.ParseUint(string(ent[2:len(ent)-1]), 10, 64) + } + if err == nil { // only if conversion was valid return here. + return end, newTextNode([]byte(string(codepoint))) } return end, newTextNode(ent) diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go new file mode 100644 index 00000000..d7fcbf28 --- /dev/null +++ b/vendor/github.com/google/uuid/null.go @@ -0,0 +1,118 @@ +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index 60d26bb5..a57207ae 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -12,6 +12,7 @@ import ( "fmt" "io" "strings" + "sync" ) // A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC @@ -33,7 +34,15 @@ const ( Future // Reserved for future definition. ) -var rander = rand.Reader // random function +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) type invalidLengthError struct{ len int } @@ -41,6 +50,12 @@ func (err invalidLengthError) Error() string { return fmt.Sprintf("invalid UUID length: %d", err.len) } +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} + // Parse decodes s into a UUID or returns an error. Both the standard UUID // forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the @@ -249,3 +264,31 @@ func SetRand(r io.Reader) { } rander = r } + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go index 86160fbd..7697802e 100644 --- a/vendor/github.com/google/uuid/version4.go +++ b/vendor/github.com/google/uuid/version4.go @@ -27,6 +27,8 @@ func NewString() string { // The strength of the UUIDs is based on the strength of the crypto/rand // package. // +// Uses the randomness pool if it was enabled with EnableRandPool. +// // A note about uniqueness derived from the UUID Wikipedia entry: // // Randomly generated UUIDs have 122 random bits. One's annual risk of being @@ -35,7 +37,10 @@ func NewString() string { // equivalent to the odds of creating a few tens of trillions of UUIDs in a // year and having one duplicate. func NewRandom() (UUID, error) { - return NewRandomFromReader(rander) + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() } // NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. @@ -49,3 +54,23 @@ func NewRandomFromReader(r io.Reader) (UUID, error) { uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 return uuid, nil } + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md index 52b111d5..c589addf 100644 --- a/vendor/github.com/json-iterator/go/README.md +++ b/vendor/github.com/json-iterator/go/README.md @@ -8,8 +8,6 @@ A high-performance 100% compatible drop-in replacement of "encoding/json" -You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) - # Benchmark ![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) diff --git a/vendor/github.com/keybase/go-keybase-chat-bot/kbchat/kbchat.go b/vendor/github.com/keybase/go-keybase-chat-bot/kbchat/kbchat.go index 68c8ca70..886cfff2 100644 --- a/vendor/github.com/keybase/go-keybase-chat-bot/kbchat/kbchat.go +++ b/vendor/github.com/keybase/go-keybase-chat-bot/kbchat/kbchat.go @@ -9,6 +9,7 @@ import ( "io/ioutil" "os" "os/exec" + "runtime" "sync" "time" @@ -207,7 +208,9 @@ func (a *API) getUsername(runOpts RunOptions) (username string, err error) { if err != nil { return "", err } - p.ExtraFiles = []*os.File{output.(*os.File)} + if runtime.GOOS != "windows" { + p.ExtraFiles = []*os.File{output.(*os.File)} + } if err = p.Start(); err != nil { return "", err } @@ -282,7 +285,7 @@ func (a *API) startPipes() (err error) { defer a.Unlock() if a.apiCmd != nil { if err := a.apiCmd.Process.Kill(); err != nil { - return err + return fmt.Errorf("unable to kill previous API command %v", err) } } a.apiCmd = nil @@ -290,30 +293,32 @@ func (a *API) startPipes() (err error) { if a.runOpts.StartService { args := []string{fmt.Sprintf("-enable-bot-lite-mode=%v", a.runOpts.DisableBotLiteMode), "service"} if err := a.runOpts.Command(args...).Start(); err != nil { - return err + return fmt.Errorf("unable to start service %v", err) } } if a.username, err = a.auth(); err != nil { - return err + return fmt.Errorf("unable to auth: %v", err) } cmd := a.runOpts.Command("chat", "notification-settings", fmt.Sprintf("-disable-typing=%v", !a.runOpts.EnableTyping)) if err = cmd.Run(); err != nil { - return err + return fmt.Errorf("unable to set notifiation settings %v", err) } a.apiCmd = a.runOpts.Command("chat", "api") if a.apiInput, err = a.apiCmd.StdinPipe(); err != nil { - return err + return fmt.Errorf("unable to get api stdin: %v", err) } output, err := a.apiCmd.StdoutPipe() if err != nil { - return err + return fmt.Errorf("unabel to get api stdout: %v", err) + } + if runtime.GOOS != "windows" { + a.apiCmd.ExtraFiles = []*os.File{output.(*os.File)} } - a.apiCmd.ExtraFiles = []*os.File{output.(*os.File)} if err := a.apiCmd.Start(); err != nil { - return err + return fmt.Errorf("unable to run chat api cmd: %v", err) } a.apiOutput = bufio.NewReader(output) return nil @@ -508,7 +513,9 @@ func (a *API) Listen(opts ListenOptions) (*Subscription, error) { time.Sleep(pause) continue } - p.ExtraFiles = []*os.File{stderr.(*os.File), output.(*os.File)} + if runtime.GOOS != "windows" { + p.ExtraFiles = []*os.File{stderr.(*os.File), output.(*os.File)} + } boutput := bufio.NewScanner(output) if err := p.Start(); err != nil { diff --git a/vendor/github.com/klauspost/cpuid/v2/.travis.yml b/vendor/github.com/klauspost/cpuid/v2/.travis.yml deleted file mode 100644 index aa9bad7e..00000000 --- a/vendor/github.com/klauspost/cpuid/v2/.travis.yml +++ /dev/null @@ -1,67 +0,0 @@ -language: go - -os: - - linux - - osx - - windows - -arch: - - amd64 - - arm64 - -go: - - 1.13.x - - 1.14.x - - 1.15.x - - 1.16.x - - master - -env: - - CGO_ENABLED=0 - -script: - - go vet ./... - - go test -test.v -test.run ^TestCPUID$ - - CGO_ENABLED=1 go test -race ./... - - go test -tags=nounsafe -test.v -test.run ^TestCPUID$ - - go test -tags=noasm ./... - - go run ./cmd/cpuid/main.go - - go run ./cmd/cpuid/main.go -json - -matrix: - allow_failures: - - go: 'master' - fast_finish: true - include: - - stage: other - go: 1.16.x - os: linux - arch: amd64 - script: - - diff <(gofmt -d .) <(printf "") - - diff <(gofmt -d ./private) <(printf "") - - curl -sfL https://git.io/goreleaser | VERSION=v0.157.0 sh -s -- check # check goreleaser config for deprecations - - curl -sL https://git.io/goreleaser | VERSION=v0.157.0 sh -s -- --snapshot --skip-publish --rm-dist - - go get github.com/klauspost/asmfmt&&go install github.com/klauspost/asmfmt/cmd/asmfmt - - diff <(asmfmt -d .) <(printf "") - - GOOS=linux GOARCH=386 go test . - - ./test-architectures.sh - - stage: other - go: 1.15.x - os: linux - arch: amd64 - script: - - ./test-architectures.sh - -deploy: - - provider: script - skip_cleanup: true - script: curl -sL https://git.io/goreleaser | VERSION=v0.157.0 bash || true - on: - tags: true - condition: ($TRAVIS_OS_NAME = linux) && ($TRAVIS_CPU_ARCH = amd64) - go: 1.16.x -branches: - only: - - master - - /^v\d+\.\d+(\.\d+)?(-\S*)?$/ diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go index 43e9cc17..1d88736b 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -83,6 +83,7 @@ const ( AVX512DQ // AVX-512 Doubleword and Quadword Instructions AVX512ER // AVX-512 Exponential and Reciprocal Instructions AVX512F // AVX-512 Foundation + AVX512FP16 // AVX-512 FP16 Instructions AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions AVX512PF // AVX-512 Prefetch Instructions AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions @@ -96,7 +97,9 @@ const ( BMI2 // Bit Manipulation Instruction Set 2 CLDEMOTE // Cache Line Demote CLMUL // Carry-less Multiplication + CLZERO // CLZERO instruction supported CMOV // i686 CMOV + CPBOOST // Core Performance Boost CX16 // CMPXCHG16B Instruction ENQCMD // Enqueue Command ERMS // Enhanced REP MOVSB/STOSB @@ -106,6 +109,7 @@ const ( GFNI // Galois Field New Instructions HLE // Hardware Lock Elision HTT // Hyperthreading (enabled) + HWA // Hardware assert supported. Indicates support for MSRC001_10 HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) IBS // Instruction Based Sampling (AMD) @@ -117,18 +121,25 @@ const ( IBSOPSAM // Instruction Based Sampling Feature (AMD) IBSRDWROPCNT // Instruction Based Sampling Feature (AMD) IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD) + INT_WBINVD // WBINVD/WBNOINVD are interruptible. + INVLPGB // NVLPGB and TLBSYNC instruction supported LZCNT // LZCNT instruction + MCAOVERFLOW // MCA overflow recovery support. + MCOMMIT // MCOMMIT instruction supported MMX // standard MMX MMXEXT // SSE integer functions or AMD MMX ext MOVDIR64B // Move 64 Bytes as Direct Store MOVDIRI // Move Doubleword as Direct Store MPX // Intel MPX (Memory Protection Extensions) + MSRIRC // Instruction Retired Counter MSR available NX // NX (No-Execute) bit POPCNT // POPCNT instruction + RDPRU // RDPRU instruction supported RDRAND // RDRAND instruction is available RDSEED // RDSEED instruction is available RDTSCP // RDTSCP Instruction RTM // Restricted Transactional Memory + RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. SERIALIZE // Serialize Instruction Execution SGX // Software Guard Extensions SGXLC // Software Guard Extensions Launch Control @@ -141,6 +152,7 @@ const ( SSE4A // AMD Barcelona microarchitecture SSE4a instructions SSSE3 // Conroe SSSE3 functions STIBP // Single Thread Indirect Branch Predictors + SUCCOR // Software uncorrectable error containment and recovery capability. TBM // AMD Trailing Bit Manipulation TSXLDTRK // Intel TSX Suspend Load Address Tracking VAES // Vector AES @@ -194,7 +206,8 @@ type CPUInfo struct { Family int // CPU family number Model int // CPU model number CacheLine int // Cache line size in bytes. Will be 0 if undetectable. - Hz int64 // Clock speed, if known, 0 otherwise + Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed. + BoostFreq int64 // Max clock speed, if known, 0 otherwise Cache struct { L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected @@ -363,25 +376,42 @@ func (c CPUInfo) LogicalCPU() int { return int(ebx >> 24) } -// hertz tries to compute the clock speed of the CPU. If leaf 15 is +// frequencies tries to compute the clock speed of the CPU. If leaf 15 is // supported, use it, otherwise parse the brand string. Yes, really. -func hertz(model string) int64 { +func (c *CPUInfo) frequencies() { + c.Hz, c.BoostFreq = 0, 0 mfi := maxFunctionID() if mfi >= 0x15 { eax, ebx, ecx, _ := cpuid(0x15) if eax != 0 && ebx != 0 && ecx != 0 { - return int64((int64(ecx) * int64(ebx)) / int64(eax)) + c.Hz = (int64(ecx) * int64(ebx)) / int64(eax) } } + if mfi >= 0x16 { + a, b, _, _ := cpuid(0x16) + // Base... + if a&0xffff > 0 { + c.Hz = int64(a&0xffff) * 1_000_000 + } + // Boost... + if b&0xffff > 0 { + c.BoostFreq = int64(b&0xffff) * 1_000_000 + } + } + if c.Hz > 0 { + return + } + // computeHz determines the official rated speed of a CPU from its brand // string. This insanity is *actually the official documented way to do // this according to Intel*, prior to leaf 0x15 existing. The official // documentation only shows this working for exactly `x.xx` or `xxxx` // cases, e.g., `2.50GHz` or `1300MHz`; this parser will accept other // sizes. + model := c.BrandName hz := strings.LastIndex(model, "Hz") if hz < 3 { - return 0 + return } var multiplier int64 switch model[hz-1] { @@ -393,7 +423,7 @@ func hertz(model string) int64 { multiplier = 1000 * 1000 * 1000 * 1000 } if multiplier == 0 { - return 0 + return } freq := int64(0) divisor := int64(0) @@ -405,21 +435,22 @@ func hertz(model string) int64 { decimalShift *= 10 } else if model[i] == '.' { if divisor != 0 { - return 0 + return } divisor = decimalShift } else { - return 0 + return } } // we didn't find a space if i < 0 { - return 0 + return } if divisor != 0 { - return (freq * multiplier) / divisor + c.Hz = (freq * multiplier) / divisor + return } - return freq * multiplier + c.Hz = freq * multiplier } // VM Will return true if the cpu id indicates we are in @@ -911,6 +942,7 @@ func support() flagSet { fs.setIf(ecx&(1<<29) != 0, ENQCMD) fs.setIf(ecx&(1<<30) != 0, SGXLC) // CPUID.(EAX=7, ECX=0).EDX + fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT) fs.setIf(edx&(1<<14) != 0, SERIALIZE) fs.setIf(edx&(1<<16) != 0, TSXLDTRK) fs.setIf(edx&(1<<26) != 0, IBPB) @@ -949,6 +981,7 @@ func support() flagSet { // edx fs.setIf(edx&(1<<8) != 0, AVX512VP2INTERSECT) fs.setIf(edx&(1<<22) != 0, AMXBF16) + fs.setIf(edx&(1<<23) != 0, AVX512FP16) fs.setIf(edx&(1<<24) != 0, AMXTILE) fs.setIf(edx&(1<<25) != 0, AMXINT8) // eax1 = CPUID.(EAX=7, ECX=1).EAX @@ -980,9 +1013,23 @@ func support() flagSet { } } + if maxExtendedFunction() >= 0x80000007 { + _, b, _, d := cpuid(0x80000007) + fs.setIf((b&(1<<0)) != 0, MCAOVERFLOW) + fs.setIf((b&(1<<1)) != 0, SUCCOR) + fs.setIf((b&(1<<2)) != 0, HWA) + fs.setIf((d&(1<<9)) != 0, CPBOOST) + } + if maxExtendedFunction() >= 0x80000008 { _, b, _, _ := cpuid(0x80000008) fs.setIf((b&(1<<9)) != 0, WBNOINVD) + fs.setIf((b&(1<<8)) != 0, MCOMMIT) + fs.setIf((b&(1<<13)) != 0, INT_WBINVD) + fs.setIf((b&(1<<4)) != 0, RDPRU) + fs.setIf((b&(1<<3)) != 0, INVLPGB) + fs.setIf((b&(1<<1)) != 0, MSRIRC) + fs.setIf((b&(1<<0)) != 0, CLZERO) } if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) { diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go index 93bc20f4..367c35c8 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go @@ -30,6 +30,6 @@ func addInfo(c *CPUInfo, safe bool) { c.LogicalCores = logicalCores() c.PhysicalCores = physicalCores() c.VendorID, c.VendorString = vendorID() - c.Hz = hertz(c.BrandName) c.cacheSize() + c.frequencies() } diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go index 0e764f90..b1fe42e4 100644 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -24,103 +24,115 @@ func _() { _ = x[AVX512DQ-14] _ = x[AVX512ER-15] _ = x[AVX512F-16] - _ = x[AVX512IFMA-17] - _ = x[AVX512PF-18] - _ = x[AVX512VBMI-19] - _ = x[AVX512VBMI2-20] - _ = x[AVX512VL-21] - _ = x[AVX512VNNI-22] - _ = x[AVX512VP2INTERSECT-23] - _ = x[AVX512VPOPCNTDQ-24] - _ = x[AVXSLOW-25] - _ = x[BMI1-26] - _ = x[BMI2-27] - _ = x[CLDEMOTE-28] - _ = x[CLMUL-29] - _ = x[CMOV-30] - _ = x[CX16-31] - _ = x[ENQCMD-32] - _ = x[ERMS-33] - _ = x[F16C-34] - _ = x[FMA3-35] - _ = x[FMA4-36] - _ = x[GFNI-37] - _ = x[HLE-38] - _ = x[HTT-39] - _ = x[HYPERVISOR-40] - _ = x[IBPB-41] - _ = x[IBS-42] - _ = x[IBSBRNTRGT-43] - _ = x[IBSFETCHSAM-44] - _ = x[IBSFFV-45] - _ = x[IBSOPCNT-46] - _ = x[IBSOPCNTEXT-47] - _ = x[IBSOPSAM-48] - _ = x[IBSRDWROPCNT-49] - _ = x[IBSRIPINVALIDCHK-50] - _ = x[LZCNT-51] - _ = x[MMX-52] - _ = x[MMXEXT-53] - _ = x[MOVDIR64B-54] - _ = x[MOVDIRI-55] - _ = x[MPX-56] - _ = x[NX-57] - _ = x[POPCNT-58] - _ = x[RDRAND-59] - _ = x[RDSEED-60] - _ = x[RDTSCP-61] - _ = x[RTM-62] - _ = x[SERIALIZE-63] - _ = x[SGX-64] - _ = x[SGXLC-65] - _ = x[SHA-66] - _ = x[SSE-67] - _ = x[SSE2-68] - _ = x[SSE3-69] - _ = x[SSE4-70] - _ = x[SSE42-71] - _ = x[SSE4A-72] - _ = x[SSSE3-73] - _ = x[STIBP-74] - _ = x[TBM-75] - _ = x[TSXLDTRK-76] - _ = x[VAES-77] - _ = x[VMX-78] - _ = x[VPCLMULQDQ-79] - _ = x[WAITPKG-80] - _ = x[WBNOINVD-81] - _ = x[XOP-82] - _ = x[AESARM-83] - _ = x[ARMCPUID-84] - _ = x[ASIMD-85] - _ = x[ASIMDDP-86] - _ = x[ASIMDHP-87] - _ = x[ASIMDRDM-88] - _ = x[ATOMICS-89] - _ = x[CRC32-90] - _ = x[DCPOP-91] - _ = x[EVTSTRM-92] - _ = x[FCMA-93] - _ = x[FP-94] - _ = x[FPHP-95] - _ = x[GPA-96] - _ = x[JSCVT-97] - _ = x[LRCPC-98] - _ = x[PMULL-99] - _ = x[SHA1-100] - _ = x[SHA2-101] - _ = x[SHA3-102] - _ = x[SHA512-103] - _ = x[SM3-104] - _ = x[SM4-105] - _ = x[SVE-106] - _ = x[lastID-107] + _ = x[AVX512FP16-17] + _ = x[AVX512IFMA-18] + _ = x[AVX512PF-19] + _ = x[AVX512VBMI-20] + _ = x[AVX512VBMI2-21] + _ = x[AVX512VL-22] + _ = x[AVX512VNNI-23] + _ = x[AVX512VP2INTERSECT-24] + _ = x[AVX512VPOPCNTDQ-25] + _ = x[AVXSLOW-26] + _ = x[BMI1-27] + _ = x[BMI2-28] + _ = x[CLDEMOTE-29] + _ = x[CLMUL-30] + _ = x[CLZERO-31] + _ = x[CMOV-32] + _ = x[CPBOOST-33] + _ = x[CX16-34] + _ = x[ENQCMD-35] + _ = x[ERMS-36] + _ = x[F16C-37] + _ = x[FMA3-38] + _ = x[FMA4-39] + _ = x[GFNI-40] + _ = x[HLE-41] + _ = x[HTT-42] + _ = x[HWA-43] + _ = x[HYPERVISOR-44] + _ = x[IBPB-45] + _ = x[IBS-46] + _ = x[IBSBRNTRGT-47] + _ = x[IBSFETCHSAM-48] + _ = x[IBSFFV-49] + _ = x[IBSOPCNT-50] + _ = x[IBSOPCNTEXT-51] + _ = x[IBSOPSAM-52] + _ = x[IBSRDWROPCNT-53] + _ = x[IBSRIPINVALIDCHK-54] + _ = x[INT_WBINVD-55] + _ = x[INVLPGB-56] + _ = x[LZCNT-57] + _ = x[MCAOVERFLOW-58] + _ = x[MCOMMIT-59] + _ = x[MMX-60] + _ = x[MMXEXT-61] + _ = x[MOVDIR64B-62] + _ = x[MOVDIRI-63] + _ = x[MPX-64] + _ = x[MSRIRC-65] + _ = x[NX-66] + _ = x[POPCNT-67] + _ = x[RDPRU-68] + _ = x[RDRAND-69] + _ = x[RDSEED-70] + _ = x[RDTSCP-71] + _ = x[RTM-72] + _ = x[RTM_ALWAYS_ABORT-73] + _ = x[SERIALIZE-74] + _ = x[SGX-75] + _ = x[SGXLC-76] + _ = x[SHA-77] + _ = x[SSE-78] + _ = x[SSE2-79] + _ = x[SSE3-80] + _ = x[SSE4-81] + _ = x[SSE42-82] + _ = x[SSE4A-83] + _ = x[SSSE3-84] + _ = x[STIBP-85] + _ = x[SUCCOR-86] + _ = x[TBM-87] + _ = x[TSXLDTRK-88] + _ = x[VAES-89] + _ = x[VMX-90] + _ = x[VPCLMULQDQ-91] + _ = x[WAITPKG-92] + _ = x[WBNOINVD-93] + _ = x[XOP-94] + _ = x[AESARM-95] + _ = x[ARMCPUID-96] + _ = x[ASIMD-97] + _ = x[ASIMDDP-98] + _ = x[ASIMDHP-99] + _ = x[ASIMDRDM-100] + _ = x[ATOMICS-101] + _ = x[CRC32-102] + _ = x[DCPOP-103] + _ = x[EVTSTRM-104] + _ = x[FCMA-105] + _ = x[FP-106] + _ = x[FPHP-107] + _ = x[GPA-108] + _ = x[JSCVT-109] + _ = x[LRCPC-110] + _ = x[PMULL-111] + _ = x[SHA1-112] + _ = x[SHA2-113] + _ = x[SHA3-114] + _ = x[SHA512-115] + _ = x[SM3-116] + _ = x[SM4-117] + _ = x[SVE-118] + _ = x[lastID-119] _ = x[firstID-0] } -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWBMI1BMI2CLDEMOTECLMULCMOVCX16ENQCMDERMSF16CFMA3FMA4GFNIHLEHTTHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKLZCNTMMXMMXEXTMOVDIR64BMOVDIRIMPXNXPOPCNTRDRANDRDSEEDRDTSCPRTMSERIALIZESGXSGXLCSHASSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPTBMTSXLDTRKVAESVMXVPCLMULQDQWAITPKGWBNOINVDXOPAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWBMI1BMI2CLDEMOTECLMULCLZEROCMOVCPBOOSTCX16ENQCMDERMSF16CFMA3FMA4GFNIHLEHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKINT_WBINVDINVLPGBLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVDIR64BMOVDIRIMPXMSRIRCNXPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSERIALIZESGXSGXLCSHASSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSUCCORTBMTSXLDTRKVAESVMXVPCLMULQDQWAITPKGWBNOINVDXOPAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 141, 151, 162, 170, 180, 198, 213, 220, 224, 228, 236, 241, 245, 249, 255, 259, 263, 267, 271, 275, 278, 281, 291, 295, 298, 308, 319, 325, 333, 344, 352, 364, 380, 385, 388, 394, 403, 410, 413, 415, 421, 427, 433, 439, 442, 451, 454, 459, 462, 465, 469, 473, 477, 482, 487, 492, 497, 500, 508, 512, 515, 525, 532, 540, 543, 549, 557, 562, 569, 576, 584, 591, 596, 601, 608, 612, 614, 618, 621, 626, 631, 636, 640, 644, 648, 654, 657, 660, 663, 669} +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 234, 238, 246, 251, 257, 261, 268, 272, 278, 282, 286, 290, 294, 298, 301, 304, 307, 317, 321, 324, 334, 345, 351, 359, 370, 378, 390, 406, 416, 423, 428, 439, 446, 449, 455, 464, 471, 474, 480, 482, 488, 493, 499, 505, 511, 514, 530, 539, 542, 547, 550, 553, 557, 561, 565, 570, 575, 580, 585, 591, 594, 602, 606, 609, 619, 626, 634, 637, 643, 651, 656, 663, 670, 678, 685, 690, 695, 702, 706, 708, 712, 715, 720, 725, 730, 734, 738, 742, 748, 751, 754, 757, 763} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { diff --git a/vendor/github.com/lrstanley/girc/builtin.go b/vendor/github.com/lrstanley/girc/builtin.go index 778a5c68..081e9465 100644 --- a/vendor/github.com/lrstanley/girc/builtin.go +++ b/vendor/github.com/lrstanley/girc/builtin.go @@ -108,7 +108,10 @@ func nickCollisionHandler(c *Client, e Event) { return } - c.Cmd.Nick(c.Config.HandleNickCollide(c.GetNick())) + newNick := c.Config.HandleNickCollide(c.GetNick()) + if newNick != "" { + c.Cmd.Nick(newNick) + } } // handlePING helps respond to ping requests from the server. diff --git a/vendor/github.com/lrstanley/girc/client.go b/vendor/github.com/lrstanley/girc/client.go index f8035755..3308b399 100644 --- a/vendor/github.com/lrstanley/girc/client.go +++ b/vendor/github.com/lrstanley/girc/client.go @@ -168,6 +168,9 @@ type Config struct { // an invalid nickname. For example, if "test" is already in use, or is // blocked by the network/a service, the client will try and use "test_", // then it will attempt "test__", "test___", and so on. + // + // If HandleNickCollide returns an empty string, the client will not + // attempt to fix nickname collisions, and you must handle this yourself. HandleNickCollide func(oldNick string) (newNick string) } diff --git a/vendor/github.com/matterbridge/matterclient/matterclient.go b/vendor/github.com/matterbridge/matterclient/matterclient.go index a32219fe..3a89e141 100644 --- a/vendor/github.com/matterbridge/matterclient/matterclient.go +++ b/vendor/github.com/matterbridge/matterclient/matterclient.go @@ -71,6 +71,7 @@ type Client struct { WsConnected bool OnWsConnect func() reconnectBusy bool + Timeout int logger *logrus.Entry rootLogger *logrus.Logger @@ -80,6 +81,8 @@ type Client struct { lastPong time.Time } +var Matterircd bool + func New(login string, pass string, team string, server string, mfatoken string) *Client { rootLogger := logrus.New() rootLogger.SetFormatter(&prefixed.TextFormatter{ @@ -229,7 +232,12 @@ func (m *Client) initClient(b *backoff.Backoff) error { }, Proxy: http.ProxyFromEnvironment, } - m.Client.HTTPClient.Timeout = time.Second * 10 + + if m.Timeout == 0 { + m.Timeout = 10 + } + + m.Client.HTTPClient.Timeout = time.Second * time.Duration(m.Timeout) // handle MMAUTHTOKEN and personal token if err := m.handleLoginToken(); err != nil { @@ -613,7 +621,9 @@ func (m *Client) WsReceiver(ctx context.Context) { Team: m.Credentials.Team, } - m.parseMessage(msg) + if !Matterircd { + m.parseMessage(msg) + } m.MessageChan <- msg case response := <-m.WsClient.ResponseChannel: diff --git a/vendor/github.com/mattermost/logr/v2/buffer.go b/vendor/github.com/mattermost/logr/v2/buffer.go new file mode 100644 index 00000000..42bf5255 --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/buffer.go @@ -0,0 +1,28 @@ +package logr + +import ( + "bytes" + "sync" +) + +// Buffer provides a thread-safe buffer useful for logging to memory in unit tests. +type Buffer struct { + buf bytes.Buffer + mux sync.Mutex +} + +func (b *Buffer) Read(p []byte) (n int, err error) { + b.mux.Lock() + defer b.mux.Unlock() + return b.buf.Read(p) +} +func (b *Buffer) Write(p []byte) (n int, err error) { + b.mux.Lock() + defer b.mux.Unlock() + return b.buf.Write(p) +} +func (b *Buffer) String() string { + b.mux.Lock() + defer b.mux.Unlock() + return b.buf.String() +} diff --git a/vendor/github.com/mattermost/logr/v2/config/config.go b/vendor/github.com/mattermost/logr/v2/config/config.go index a93b7a25..e01a5514 100644 --- a/vendor/github.com/mattermost/logr/v2/config/config.go +++ b/vendor/github.com/mattermost/logr/v2/config/config.go @@ -31,8 +31,8 @@ type TargetFactory func(targetType string, options json.RawMessage) (logr.Target type FormatterFactory func(format string, options json.RawMessage) (logr.Formatter, error) type Factories struct { - targetFactory TargetFactory // can be nil - formatterFactory FormatterFactory // can be nil + TargetFactory TargetFactory // can be nil + FormatterFactory FormatterFactory // can be nil } var removeAll = func(ti logr.TargetInfo) bool { return true } @@ -56,7 +56,7 @@ func ConfigureTargets(lgr *logr.Logr, config map[string]TargetCfg, factories *Fa } for name, tcfg := range config { - target, err := newTarget(tcfg.Type, tcfg.Options, factories.targetFactory) + target, err := newTarget(tcfg.Type, tcfg.Options, factories.TargetFactory) if err != nil { return fmt.Errorf("error creating log target %s: %w", name, err) } @@ -65,7 +65,7 @@ func ConfigureTargets(lgr *logr.Logr, config map[string]TargetCfg, factories *Fa continue } - formatter, err := newFormatter(tcfg.Format, tcfg.FormatOptions, factories.formatterFactory) + formatter, err := newFormatter(tcfg.Format, tcfg.FormatOptions, factories.FormatterFactory) if err != nil { return fmt.Errorf("error creating formatter for log target %s: %w", name, err) } diff --git a/vendor/github.com/mattermost/logr/v2/field.go b/vendor/github.com/mattermost/logr/v2/field.go index 5725d0a1..33342870 100644 --- a/vendor/github.com/mattermost/logr/v2/field.go +++ b/vendor/github.com/mattermost/logr/v2/field.go @@ -15,7 +15,7 @@ var ( Space = []byte{' '} Newline = []byte{'\n'} Quote = []byte{'"'} - Colon = []byte{'"'} + Colon = []byte{':'} ) // LogCloner is implemented by `Any` types that require a clone to be provided diff --git a/vendor/github.com/mattermost/logr/v2/filterstd.go b/vendor/github.com/mattermost/logr/v2/filterstd.go index 7f38a332..fe917fe5 100644 --- a/vendor/github.com/mattermost/logr/v2/filterstd.go +++ b/vendor/github.com/mattermost/logr/v2/filterstd.go @@ -11,6 +11,7 @@ type StdFilter struct { // is enabled for this filter. func (lt StdFilter) GetEnabledLevel(level Level) (Level, bool) { enabled := level.ID <= lt.Lvl.ID + stackTrace := level.ID <= lt.Stacktrace.ID var levelEnabled Level if enabled { @@ -33,6 +34,11 @@ func (lt StdFilter) GetEnabledLevel(level Level) (Level, bool) { levelEnabled = level } } + + if stackTrace { + levelEnabled.Stacktrace = true + } + return levelEnabled, enabled } diff --git a/vendor/github.com/mattermost/logr/v2/sugar.go b/vendor/github.com/mattermost/logr/v2/sugar.go index f4f300ee..882f0fd5 100644 --- a/vendor/github.com/mattermost/logr/v2/sugar.go +++ b/vendor/github.com/mattermost/logr/v2/sugar.go @@ -117,3 +117,81 @@ func (s Sugar) Fatalf(format string, args ...interface{}) { func (s Sugar) Panicf(format string, args ...interface{}) { s.Logf(Panic, format, args...) } + +// +// K/V style +// + +// With returns a new Sugar logger with the specified key/value pairs added to the +// fields list. +func (s Sugar) With(keyValuePairs ...interface{}) Sugar { + return s.logger.With(s.argsToFields(keyValuePairs)...).Sugar() +} + +// Tracew outputs at trace level with the specified key/value pairs converted to fields. +func (s Sugar) Tracew(msg string, keyValuePairs ...interface{}) { + s.logger.Log(Trace, msg, s.argsToFields(keyValuePairs)...) +} + +// Debugw outputs at debug level with the specified key/value pairs converted to fields. +func (s Sugar) Debugw(msg string, keyValuePairs ...interface{}) { + s.logger.Log(Debug, msg, s.argsToFields(keyValuePairs)...) +} + +// Infow outputs at info level with the specified key/value pairs converted to fields. +func (s Sugar) Infow(msg string, keyValuePairs ...interface{}) { + s.logger.Log(Info, msg, s.argsToFields(keyValuePairs)...) +} + +// Warnw outputs at warn level with the specified key/value pairs converted to fields. +func (s Sugar) Warnw(msg string, keyValuePairs ...interface{}) { + s.logger.Log(Warn, msg, s.argsToFields(keyValuePairs)...) +} + +// Errorw outputs at error level with the specified key/value pairs converted to fields. +func (s Sugar) Errorw(msg string, keyValuePairs ...interface{}) { + s.logger.Log(Error, msg, s.argsToFields(keyValuePairs)...) +} + +// Fatalw outputs at fatal level with the specified key/value pairs converted to fields. +func (s Sugar) Fatalw(msg string, keyValuePairs ...interface{}) { + s.logger.Log(Fatal, msg, s.argsToFields(keyValuePairs)...) +} + +// Panicw outputs at panic level with the specified key/value pairs converted to fields. +func (s Sugar) Panicw(msg string, keyValuePairs ...interface{}) { + s.logger.Log(Panic, msg, s.argsToFields(keyValuePairs)...) +} + +// argsToFields converts an array of args, possibly containing name/value pairs +// into a []Field. +func (s Sugar) argsToFields(keyValuePairs []interface{}) []Field { + if len(keyValuePairs) == 0 { + return nil + } + + fields := make([]Field, 0, len(keyValuePairs)) + count := len(keyValuePairs) + + for i := 0; i < count; { + if fld, ok := keyValuePairs[i].(Field); ok { + fields = append(fields, fld) + i++ + continue + } + + if i == count-1 { + s.logger.Error("invalid key/value pair", Any("arg", keyValuePairs[i])) + break + } + + // we should have a key/value pair now. The key must be a string. + if key, ok := keyValuePairs[i].(string); !ok { + s.logger.Error("invalid key for key/value pair", Int("pos", i)) + } else { + fields = append(fields, Any(key, keyValuePairs[i+1])) + } + i += 2 + } + return fields +} diff --git a/vendor/github.com/mattermost/logr/v2/targets/testing.go b/vendor/github.com/mattermost/logr/v2/targets/testing.go new file mode 100644 index 00000000..ea3df70c --- /dev/null +++ b/vendor/github.com/mattermost/logr/v2/targets/testing.go @@ -0,0 +1,72 @@ +package targets + +import ( + "strings" + "sync" + "testing" + + "github.com/mattermost/logr/v2" + "github.com/mattermost/logr/v2/formatters" +) + +// Testing is a simple log target that writes to a (*testing.T) log. +type Testing struct { + mux sync.Mutex + t *testing.T +} + +func NewTestingTarget(t *testing.T) *Testing { + return &Testing{ + t: t, + } +} + +// Init is called once to initialize the target. +func (tt *Testing) Init() error { + return nil +} + +// Write outputs bytes to this file target. +func (tt *Testing) Write(p []byte, rec *logr.LogRec) (int, error) { + tt.mux.Lock() + defer tt.mux.Unlock() + + if tt.t != nil { + s := strings.TrimSpace(string(p)) + tt.t.Log(s) + } + return len(p), nil +} + +// Shutdown is called once to free/close any resources. +// Target queue is already drained when this is called. +func (tt *Testing) Shutdown() error { + tt.mux.Lock() + defer tt.mux.Unlock() + + tt.t = nil + return nil +} + +// CreateTestLogger creates a logger for unit tests. Log records are output to `(*testing.T).Log`. +// A new logger is returned along with a method to shutdown the new logger. +func CreateTestLogger(t *testing.T, levels ...logr.Level) (logger logr.Logger, shutdown func() error) { + lgr, _ := logr.New() + filter := logr.NewCustomFilter(levels...) + formatter := &formatters.Plain{EnableCaller: true} + target := NewTestingTarget(t) + + if err := lgr.AddTarget(target, "test", filter, formatter, 1000); err != nil { + t.Fail() + } + shutdown = func() error { + err := lgr.Shutdown() + if err != nil { + target.mux.Lock() + target.t.Error("error shutting down test logger", err) + target.mux.Unlock() + } + return err + } + return lgr.NewLogger(), shutdown +} diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/auditconv.go b/vendor/github.com/mattermost/mattermost-server/v6/model/auditconv.go index 8de21124..1bcb2363 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/model/auditconv.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/auditconv.go @@ -4,6 +4,8 @@ package model import ( + "strings" + "github.com/francoispqt/gojay" ) @@ -268,7 +270,10 @@ func newAuditCommandArgs(ca *CommandArgs) auditCommandArgs { cmdargs.ChannelID = ca.ChannelId cmdargs.TeamID = ca.TeamId cmdargs.TriggerID = ca.TriggerId - cmdargs.Command = ca.Command + cmdFields := strings.Fields(ca.Command) + if len(cmdFields) > 0 { + cmdargs.Command = cmdFields[0] + } } return cmdargs } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/bot.go b/vendor/github.com/mattermost/mattermost-server/v6/model/bot.go index fe9b9078..7b581089 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/model/bot.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/bot.go @@ -63,12 +63,8 @@ func (b *Bot) Clone() *Bot { return © } -// IsValid validates the bot and returns an error if it isn't configured correctly. -func (b *Bot) IsValid() *AppError { - if !IsValidId(b.UserId) { - return NewAppError("Bot.IsValid", "model.bot.is_valid.user_id.app_error", b.Trace(), "", http.StatusBadRequest) - } - +// IsValidCreate validates bot for Create call. This skips validations of fields that are auto-filled on Create +func (b *Bot) IsValidCreate() *AppError { if !IsValidUsername(b.Username) { return NewAppError("Bot.IsValid", "model.bot.is_valid.username.app_error", b.Trace(), "", http.StatusBadRequest) } @@ -85,6 +81,15 @@ func (b *Bot) IsValid() *AppError { return NewAppError("Bot.IsValid", "model.bot.is_valid.creator_id.app_error", b.Trace(), "", http.StatusBadRequest) } + return nil +} + +// IsValid validates the bot and returns an error if it isn't configured correctly. +func (b *Bot) IsValid() *AppError { + if !IsValidId(b.UserId) { + return NewAppError("Bot.IsValid", "model.bot.is_valid.user_id.app_error", b.Trace(), "", http.StatusBadRequest) + } + if b.CreateAt == 0 { return NewAppError("Bot.IsValid", "model.bot.is_valid.create_at.app_error", b.Trace(), "", http.StatusBadRequest) } @@ -92,8 +97,7 @@ func (b *Bot) IsValid() *AppError { if b.UpdateAt == 0 { return NewAppError("Bot.IsValid", "model.bot.is_valid.update_at.app_error", b.Trace(), "", http.StatusBadRequest) } - - return nil + return b.IsValidCreate() } // PreSave should be run before saving a new bot to the database. diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/channel.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel.go index 75fc2680..20604700 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/model/channel.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/channel.go @@ -56,6 +56,7 @@ type Channel struct { Shared *bool `json:"shared"` TotalMsgCountRoot int64 `json:"total_msg_count_root"` PolicyID *string `json:"policy_id" db:"-"` + LastRootPostAt int64 `json:"last_root_post_at"` } type ChannelWithTeamData struct { diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/channel_member.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_member.go index 82bc84ae..d0bfc4a5 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/model/channel_member.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_member.go @@ -69,7 +69,6 @@ type ChannelMemberForExport struct { } func (o *ChannelMember) IsValid() *AppError { - if !IsValidId(o.ChannelId) { return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest) } @@ -106,6 +105,11 @@ func (o *ChannelMember) IsValid() *AppError { } } + if len(o.Roles) > UserRolesMaxLength { + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.roles_limit.app_error", + map[string]interface{}{"Limit": UserRolesMaxLength}, "", http.StatusBadRequest) + } + return nil } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/client4.go b/vendor/github.com/mattermost/mattermost-server/v6/model/client4.go index 615b1264..7bbc98e7 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/model/client4.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/client4.go @@ -3899,7 +3899,13 @@ func (c *Client4) SearchPostsWithParams(teamId string, params *SearchParameter) if jsonErr != nil { return nil, nil, NewAppError("SearchFilesWithParams", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError) } - r, err := c.DoAPIPost(c.teamRoute(teamId)+"/posts/search", string(js)) + var route string + if teamId == "" { + route = c.postsRoute() + "/search" + } else { + route = c.teamRoute(teamId) + "/posts/search" + } + r, err := c.DoAPIPost(route, string(js)) if err != nil { return nil, BuildResponse(r), err } @@ -3917,7 +3923,13 @@ func (c *Client4) SearchPostsWithParams(teamId string, params *SearchParameter) // SearchPostsWithMatches returns any posts with matching terms string, including. func (c *Client4) SearchPostsWithMatches(teamId string, terms string, isOrSearch bool) (*PostSearchResults, *Response, error) { requestBody := map[string]interface{}{"terms": terms, "is_or_search": isOrSearch} - r, err := c.DoAPIPost(c.teamRoute(teamId)+"/posts/search", StringInterfaceToJSON(requestBody)) + var route string + if teamId == "" { + route = c.postsRoute() + "/search" + } else { + route = c.teamRoute(teamId) + "/posts/search" + } + r, err := c.DoAPIPost(route, StringInterfaceToJSON(requestBody)) if err != nil { return nil, BuildResponse(r), err } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/cloud.go b/vendor/github.com/mattermost/mattermost-server/v6/model/cloud.go index ffd85a2a..7c63c138 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/model/cloud.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/cloud.go @@ -20,8 +20,9 @@ var MockCWS string type BillingScheme string const ( - BillingSchemePerSeat = BillingScheme("per_seat") - BillingSchemeFlatFee = BillingScheme("flat_fee") + BillingSchemePerSeat = BillingScheme("per_seat") + BillingSchemeFlatFee = BillingScheme("flat_fee") + BillingSchemeSalesServe = BillingScheme("sales_serve") ) type RecurringInterval string @@ -104,7 +105,7 @@ type Address struct { // PaymentMethod represents methods of payment for a customer. type PaymentMethod struct { Type string `json:"type"` - LastFour int `json:"last_four"` + LastFour string `json:"last_four"` ExpMonth int `json:"exp_month"` ExpYear int `json:"exp_year"` CardBrand string `json:"card_brand"` @@ -169,7 +170,7 @@ type CWSWebhookPayload struct { type FailedPayment struct { CardBrand string `json:"card_brand"` - LastFour int `json:"last_four"` + LastFour string `json:"last_four"` FailureMessage string `json:"failure_message"` } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/config.go b/vendor/github.com/mattermost/mattermost-server/v6/model/config.go index 39cdb893..a2eda9ff 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/model/config.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/config.go @@ -352,6 +352,7 @@ type ServiceSettings struct { EnableBotAccountCreation *bool `access:"integrations_bot_accounts"` EnableSVGs *bool `access:"site_posts"` EnableLatex *bool `access:"site_posts"` + EnableInlineLatex *bool `access:"site_posts"` EnableAPIChannelDeletion *bool EnableLocalMode *bool LocalModeSocketLocation *string // telemetry: none @@ -736,6 +737,10 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) { } } + if s.EnableInlineLatex == nil { + s.EnableInlineLatex = NewBool(true) + } + if s.EnableLocalMode == nil { s.EnableLocalMode = NewBool(false) } @@ -2610,8 +2615,9 @@ func (s *DataRetentionSettings) SetDefaults() { } type JobSettings struct { - RunJobs *bool `access:"write_restrictable,cloud_restrictable"` - RunScheduler *bool `access:"write_restrictable,cloud_restrictable"` + RunJobs *bool `access:"write_restrictable,cloud_restrictable"` + RunScheduler *bool `access:"write_restrictable,cloud_restrictable"` + CleanupJobsThresholdDays *int `access:"write_restrictable,cloud_restrictable"` } func (s *JobSettings) SetDefaults() { @@ -2622,6 +2628,10 @@ func (s *JobSettings) SetDefaults() { if s.RunScheduler == nil { s.RunScheduler = NewBool(true) } + + if s.CleanupJobsThresholdDays == nil { + s.CleanupJobsThresholdDays = NewInt(-1) + } } type CloudSettings struct { @@ -3736,9 +3746,11 @@ func (o *Config) Sanitize() { *o.LdapSettings.BindPassword = FakeSetting } - *o.FileSettings.PublicLinkSalt = FakeSetting + if o.FileSettings.PublicLinkSalt != nil { + *o.FileSettings.PublicLinkSalt = FakeSetting + } - if *o.FileSettings.AmazonS3SecretAccessKey != "" { + if o.FileSettings.AmazonS3SecretAccessKey != nil && *o.FileSettings.AmazonS3SecretAccessKey != "" { *o.FileSettings.AmazonS3SecretAccessKey = FakeSetting } @@ -3746,7 +3758,7 @@ func (o *Config) Sanitize() { *o.EmailSettings.SMTPPassword = FakeSetting } - if *o.GitLabSettings.Secret != "" { + if o.GitLabSettings.Secret != nil && *o.GitLabSettings.Secret != "" { *o.GitLabSettings.Secret = FakeSetting } @@ -3762,10 +3774,17 @@ func (o *Config) Sanitize() { *o.OpenIdSettings.Secret = FakeSetting } - *o.SqlSettings.DataSource = FakeSetting - *o.SqlSettings.AtRestEncryptKey = FakeSetting + if o.SqlSettings.DataSource != nil { + *o.SqlSettings.DataSource = FakeSetting + } - *o.ElasticsearchSettings.Password = FakeSetting + if o.SqlSettings.AtRestEncryptKey != nil { + *o.SqlSettings.AtRestEncryptKey = FakeSetting + } + + if o.ElasticsearchSettings.Password != nil { + *o.ElasticsearchSettings.Password = FakeSetting + } for i := range o.SqlSettings.DataSourceReplicas { o.SqlSettings.DataSourceReplicas[i] = FakeSetting @@ -3775,7 +3794,9 @@ func (o *Config) Sanitize() { o.SqlSettings.DataSourceSearchReplicas[i] = FakeSetting } - if o.MessageExportSettings.GlobalRelaySettings.SMTPPassword != nil && *o.MessageExportSettings.GlobalRelaySettings.SMTPPassword != "" { + if o.MessageExportSettings.GlobalRelaySettings != nil && + o.MessageExportSettings.GlobalRelaySettings.SMTPPassword != nil && + *o.MessageExportSettings.GlobalRelaySettings.SMTPPassword != "" { *o.MessageExportSettings.GlobalRelaySettings.SMTPPassword = FakeSetting } @@ -3783,7 +3804,9 @@ func (o *Config) Sanitize() { *o.ServiceSettings.GfycatAPISecret = FakeSetting } - *o.ServiceSettings.SplitKey = FakeSetting + if o.ServiceSettings.SplitKey != nil { + *o.ServiceSettings.SplitKey = FakeSetting + } } // structToMapFilteredByTag converts a struct into a map removing those fields that has the tag passed diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go b/vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go index a341c254..81b62172 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go @@ -33,9 +33,6 @@ type FeatureFlags struct { PluginApps string `plugin_id:"com.mattermost.apps"` PluginFocalboard string `plugin_id:"focalboard"` - // Enable timed dnd support for user status - TimedDND bool - PermalinkPreviews bool // Enable the Global Header @@ -43,6 +40,23 @@ type FeatureFlags struct { // Enable different team menu button treatments, possible values = ("none", "by_team_name", "inverted_sidebar_bg_color") AddChannelButton string + + // Enable different treatments for first time users, possible values = ("none", "tour_point", "around_input") + PrewrittenMessages string + + // Enable different treatments for first time users, possible values = ("none", "tips_and_next_steps") + DownloadAppsCTA string + + // Determine whether when a user gets created, they'll have noisy notifications e.g. Send desktop notifications for all activity + NewAccountNoisy bool + // Enable Boards Unfurl Preview + BoardsUnfurl bool + + // Enable Calls plugin support in the mobile app + CallsMobile bool + + // Start A/B tour tips automatically, possible values = ("none", "auto") + AutoTour string } func (f *FeatureFlags) SetDefaults() { @@ -54,10 +68,15 @@ func (f *FeatureFlags) SetDefaults() { f.AppsEnabled = false f.PluginApps = "" f.PluginFocalboard = "" - f.TimedDND = false f.PermalinkPreviews = true f.GlobalHeader = true f.AddChannelButton = "by_team_name" + f.PrewrittenMessages = "tour_point" + f.DownloadAppsCTA = "tips_and_next_steps" + f.NewAccountNoisy = false + f.BoardsUnfurl = true + f.CallsMobile = false + f.AutoTour = "none" } func (f *FeatureFlags) Plugins() map[string]string { diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/integration_action.go b/vendor/github.com/mattermost/mattermost-server/v6/model/integration_action.go index c61cc6d4..4c645d02 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/model/integration_action.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/integration_action.go @@ -115,6 +115,14 @@ func (p *PostAction) Equals(input *PostAction) bool { } // Compare PostActionIntegration + + // If input is nil, then return true if original is also nil. + // Else return false. + if input.Integration == nil { + return p.Integration == nil + } + + // Both are unequal and not nil. if p.Integration.URL != input.Integration.URL { return false } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/post_embed.go b/vendor/github.com/mattermost/mattermost-server/v6/model/post_embed.go index ea3e2c5b..b72ae6e1 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/model/post_embed.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/post_embed.go @@ -9,6 +9,7 @@ const ( PostEmbedOpengraph PostEmbedType = "opengraph" PostEmbedLink PostEmbedType = "link" PostEmbedPermalink PostEmbedType = "permalink" + PostEmbedBoards PostEmbedType = "boards" ) type PostEmbedType string diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/session.go b/vendor/github.com/mattermost/mattermost-server/v6/model/session.go index d3bbc6e4..72f8d646 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/model/session.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/session.go @@ -4,6 +4,7 @@ package model import ( + "net/http" "strconv" "strings" @@ -78,6 +79,27 @@ func (s *Session) DeepCopy() *Session { return ©Session } +func (s *Session) IsValid() *AppError { + if !IsValidId(s.Id) { + return NewAppError("Session.IsValid", "model.session.is_valid.id.app_error", nil, "", http.StatusBadRequest) + } + + if !IsValidId(s.UserId) { + return NewAppError("Session.IsValid", "model.session.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) + } + + if s.CreateAt == 0 { + return NewAppError("Session.IsValid", "model.session.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) + } + + if len(s.Roles) > UserRolesMaxLength { + return NewAppError("Session.IsValid", "model.session.is_valid.roles_limit.app_error", + map[string]interface{}{"Limit": UserRolesMaxLength}, "session_id="+s.Id, http.StatusBadRequest) + } + + return nil +} + func (s *Session) PreSave() { if s.Id == "" { s.Id = NewId() diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/shared_channel.go b/vendor/github.com/mattermost/mattermost-server/v6/model/shared_channel.go index 08a29292..ed069b28 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/model/shared_channel.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/shared_channel.go @@ -238,6 +238,7 @@ func (scf *SharedChannelAttachment) IsValid() *AppError { type SharedChannelFilterOpts struct { TeamId string CreatorId string + MemberId string ExcludeHome bool ExcludeRemote bool } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/team_member.go b/vendor/github.com/mattermost/mattermost-server/v6/model/team_member.go index 2c928d2d..c0b3772c 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/model/team_member.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/team_member.go @@ -98,7 +98,6 @@ func TeamMemberWithErrorToString(o *TeamMemberWithError) string { } func (o *TeamMember) IsValid() *AppError { - if !IsValidId(o.TeamId) { return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.team_id.app_error", nil, "", http.StatusBadRequest) } @@ -107,6 +106,11 @@ func (o *TeamMember) IsValid() *AppError { return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } + if len(o.Roles) > UserRolesMaxLength { + return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.roles_limit.app_error", + map[string]interface{}{"Limit": UserRolesMaxLength}, "", http.StatusBadRequest) + } + return nil } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/user.go b/vendor/github.com/mattermost/mattermost-server/v6/model/user.go index 2e843ea1..271ccd81 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/model/user.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/user.go @@ -60,6 +60,7 @@ const ( UserPasswordMaxLength = 72 UserLocaleMaxLength = 5 UserTimezoneMaxRunes = 256 + UserRolesMaxLength = 256 ) //msgp:tuple User @@ -261,7 +262,6 @@ func (u *User) DeepCopy() *User { // IsValid validates the user and returns an error if it isn't configured // correctly. func (u *User) IsValid() *AppError { - if !IsValidId(u.Id) { return InvalidUserError("id", "") } @@ -332,6 +332,11 @@ func (u *User) IsValid() *AppError { } } + if len(u.Roles) > UserRolesMaxLength { + return NewAppError("User.IsValid", "model.user.is_valid.roles_limit.app_error", + map[string]interface{}{"Limit": UserRolesMaxLength}, "user_id="+u.Id, http.StatusBadRequest) + } + return nil } diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/utils.go b/vendor/github.com/mattermost/mattermost-server/v6/model/utils.go index fab3f494..c57716f7 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/model/utils.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/utils.go @@ -6,6 +6,7 @@ package model import ( "bytes" "crypto/rand" + "database/sql/driver" "encoding/base32" "encoding/json" "fmt" @@ -24,6 +25,7 @@ import ( "github.com/mattermost/mattermost-server/v6/shared/i18n" "github.com/pborman/uuid" + "github.com/pkg/errors" ) const ( @@ -72,6 +74,30 @@ func (sa StringArray) Equals(input StringArray) bool { return true } +// Value converts StringArray to database value +func (sa StringArray) Value() (driver.Value, error) { + return json.Marshal(sa) +} + +// Scan converts database column value to StringArray +func (sa *StringArray) Scan(value interface{}) error { + if value == nil { + return nil + } + + buf, ok := value.([]byte) + if ok { + return json.Unmarshal(buf, sa) + } + + str, ok := value.(string) + if ok { + return json.Unmarshal([]byte(str), sa) + } + + return errors.New("received value is neither a byte slice nor string") +} + var translateFunc i18n.TranslateFunc var translateFuncOnce sync.Once diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/version.go b/vendor/github.com/mattermost/mattermost-server/v6/model/version.go index 13170dc5..4e578887 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/model/version.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/model/version.go @@ -13,8 +13,7 @@ import ( // It should be maintained in chronological order with most current // release at the front of the list. var versions = []string{ - "6.0.2", - "6.0.1", + "6.1.0", "6.0.0", "5.39.0", "5.38.0", diff --git a/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/mlog.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/mlog.go index ac56362c..0f4cc1fe 100644 --- a/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/mlog.go +++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/mlog.go @@ -49,6 +49,9 @@ type LogRec = logr.LogRec type LogCloner = logr.LogCloner type MetricsCollector = logr.MetricsCollector type TargetCfg = logrcfg.TargetCfg +type TargetFactory = logrcfg.TargetFactory +type FormatterFactory = logrcfg.FormatterFactory +type Factories = logrcfg.Factories type Sugar = logr.Sugar // LoggerConfiguration is a map of LogTarget configurations. @@ -179,7 +182,10 @@ func NewLogger(options ...Option) (*Logger, error) { // For each case JSON containing log targets is provided. Target name collisions are resolved // using the following precedence: // cfgFile > cfgEscaped -func (l *Logger) Configure(cfgFile string, cfgEscaped string) error { +// +// An optional set of factories can be provided which will be called to create any target +// types or formatters not built-in. +func (l *Logger) Configure(cfgFile string, cfgEscaped string, factories *Factories) error { if atomic.LoadInt32(l.lockConfig) != 0 { return ErrConfigurationLock } @@ -213,16 +219,18 @@ func (l *Logger) Configure(cfgFile string, cfgEscaped string) error { return nil } - return logrcfg.ConfigureTargets(l.log.Logr(), cfgMap.toTargetCfg(), nil) + return logrcfg.ConfigureTargets(l.log.Logr(), cfgMap.toTargetCfg(), factories) } // ConfigureTargets provides a new configuration for this logger via a `LoggerConfig` map. // Typically `mlog.Configure` is used instead which accepts JSON formatted configuration. -func (l *Logger) ConfigureTargets(cfg LoggerConfiguration) error { +// An optional set of factories can be provided which will be called to create any target +// types or formatters not built-in. +func (l *Logger) ConfigureTargets(cfg LoggerConfiguration, factories *Factories) error { if atomic.LoadInt32(l.lockConfig) != 0 { return ErrConfigurationLock } - return logrcfg.ConfigureTargets(l.log.Logr(), cfg.toTargetCfg(), nil) + return logrcfg.ConfigureTargets(l.log.Logr(), cfg.toTargetCfg(), factories) } // LockConfiguration disallows further configuration changes until `UnlockConfiguration` @@ -405,6 +413,22 @@ func GetPackageName(f string) string { return f } +// ShouldQuote returns true if val contains any characters that might be unsafe +// when injecting log output into an aggregator, viewer or report. +// Returning true means that val should be surrounded by quotation marks before being +// output into logs. +func ShouldQuote(val string) bool { + for _, c := range val { + if !((c >= '0' && c <= '9') || + (c >= 'a' && c <= 'z') || + (c >= 'A' && c <= 'Z') || + c == '-' || c == '.' || c == '_' || c == '/' || c == '@' || c == '^' || c == '+') { + return true + } + } + return false +} + type logWriter struct { logger *Logger } diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml deleted file mode 100644 index 7942c565..00000000 --- a/vendor/github.com/mattn/go-colorable/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -sudo: false -go: - - 1.13.x - - tip - -before_install: - - go get -t -v ./... - -script: - - ./go.test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) - diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md index e055952b..ca048371 100644 --- a/vendor/github.com/mattn/go-colorable/README.md +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -1,6 +1,6 @@ # go-colorable -[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) +[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest) [![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) [![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) [![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go index 1f7806fe..416d1bbb 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine package colorable diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go index 08cbd1e0..766d9460 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_others.go +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -1,5 +1,5 @@ -// +build !windows -// +build !appengine +//go:build !windows && !appengine +// +build !windows,!appengine package colorable diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go index 41215d7f..1846ad5a 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -1,5 +1,5 @@ -// +build windows -// +build !appengine +//go:build windows && !appengine +// +build windows,!appengine package colorable @@ -452,18 +452,22 @@ func (w *Writer) Write(data []byte) (n int, err error) { } else { er = bytes.NewReader(data) } - var bw [1]byte + var plaintext bytes.Buffer loop: for { c1, err := er.ReadByte() if err != nil { + plaintext.WriteTo(w.out) break loop } if c1 != 0x1b { - bw[0] = c1 - w.out.Write(bw[:]) + plaintext.WriteByte(c1) continue } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } c2, err := er.ReadByte() if err != nil { break loop diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go index 95f2c6be..3df68f36 100644 --- a/vendor/github.com/mattn/go-colorable/noncolorable.go +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -18,18 +18,22 @@ func NewNonColorable(w io.Writer) io.Writer { // Write writes data on console func (w *NonColorable) Write(data []byte) (n int, err error) { er := bytes.NewReader(data) - var bw [1]byte + var plaintext bytes.Buffer loop: for { c1, err := er.ReadByte() if err != nil { + plaintext.WriteTo(w.out) break loop } if c1 != 0x1b { - bw[0] = c1 - w.out.Write(bw[:]) + plaintext.WriteByte(c1) continue } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } c2, err := er.ReadByte() if err != nil { break loop diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md index b5c26d53..3ba174f4 100644 --- a/vendor/github.com/minio/minio-go/v7/README.md +++ b/vendor/github.com/minio/minio-go/v7/README.md @@ -1,4 +1,4 @@ -# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) +# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. @@ -171,9 +171,9 @@ The full API Reference is available here. * [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) ### API Reference : Client custom settings -* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) -* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) +* [`SetAppInfo`](https://docs.min.io/docs/golang-client-api-reference#SetAppInfo) +* [`TraceOn`](https://docs.min.io/docs/golang-client-api-reference#TraceOn) +* [`TraceOff`](https://docs.min.io/docs/golang-client-api-reference#TraceOff) ## Full Examples @@ -248,4 +248,4 @@ The full API Reference is available here. [Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) ## License -This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information. +This SDK is distributed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information. diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go index 41054e13..0b357d3e 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go @@ -27,6 +27,7 @@ import ( "net/url" "time" + "github.com/google/uuid" "github.com/minio/minio-go/v7/pkg/replication" "github.com/minio/minio-go/v7/pkg/s3utils" ) @@ -187,12 +188,39 @@ func (c Client) GetBucketReplicationMetrics(ctx context.Context, bucketName stri return s, nil } +// mustGetUUID - get a random UUID. +func mustGetUUID() string { + u, err := uuid.NewRandom() + if err != nil { + return "" + } + return u.String() +} + // ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication // is enabled in the replication config -func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (resetID string, err error) { +func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) { + rID = mustGetUUID() + _, err = c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, "", rID) + if err != nil { + return rID, err + } + return rID, nil +} + +// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication +// is enabled in the replication config +func (c Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (rinfo replication.ResyncTargetsInfo, err error) { + rID := mustGetUUID() + return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, rID) +} + +// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication +// is enabled in the replication config +func (c Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string, resetID string) (rinfo replication.ResyncTargetsInfo, err error) { // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return } // Get resources properly escaped and lined up before // using them in http request. @@ -201,7 +229,10 @@ func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, o if olderThan > 0 { urlValues.Set("older-than", olderThan.String()) } - + if tgtArn != "" { + urlValues.Set("arn", tgtArn) + } + urlValues.Set("reset-id", resetID) // Execute GET on bucket to get replication config. resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ bucketName: bucketName, @@ -210,19 +241,19 @@ func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, o defer closeResponse(resp) if err != nil { - return "", err + return rinfo, err } if resp.StatusCode != http.StatusOK { - return "", httpRespToErrorResponse(resp, bucketName, "") + return rinfo, httpRespToErrorResponse(resp, bucketName, "") } respBytes, err := ioutil.ReadAll(resp.Body) if err != nil { - return "", err + return rinfo, err } - if err := json.Unmarshal(respBytes, &resetID); err != nil { - return "", err + if err := json.Unmarshal(respBytes, &rinfo); err != nil { + return rinfo, err } - return resetID, nil + return rinfo, nil } diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go index dd597e46..19a72ac3 100644 --- a/vendor/github.com/minio/minio-go/v7/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -223,6 +223,16 @@ func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuck if dstOpts.Internal.ReplicationRequest { headers.Set(minIOBucketReplicationRequest, "") } + if !dstOpts.Internal.LegalholdTimestamp.IsZero() { + headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) + } + if !dstOpts.Internal.RetentionTimestamp.IsZero() { + headers.Set(minIOBucketReplicationObjectRetentionTimestamp, dstOpts.Internal.RetentionTimestamp.Format(time.RFC3339Nano)) + } + if !dstOpts.Internal.TaggingTimestamp.IsZero() { + headers.Set(minIOBucketReplicationTaggingTimestamp, dstOpts.Internal.TaggingTimestamp.Format(time.RFC3339Nano)) + } + if len(dstOpts.UserTags) != 0 { headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags)) } @@ -513,7 +523,7 @@ func (c Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ... // 4. Make final complete-multipart request. uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID, - completeMultipartUpload{Parts: objParts}) + completeMultipartUpload{Parts: objParts}, PutObjectOptions{}) if err != nil { return UploadInfo{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go index 970e1fa5..2f5912f3 100644 --- a/vendor/github.com/minio/minio-go/v7/api-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go @@ -64,8 +64,9 @@ func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { // Owner name. type Owner struct { - DisplayName string `json:"name"` - ID string `json:"id"` + XMLName xml.Name `xml:"Owner" json:"owner"` + DisplayName string `xml:"ID" json:"name"` + ID string `xml:"DisplayName" json:"id"` } // UploadInfo contains information about the @@ -85,6 +86,14 @@ type UploadInfo struct { ExpirationRuleID string } +// RestoreInfo contains information of the restore operation of an archived object +type RestoreInfo struct { + // Is the restoring operation is still ongoing + OngoingRestore bool + // When the restored copy of the archived object will be removed + ExpiryTime time.Time +} + // ObjectInfo container for object metadata. type ObjectInfo struct { // An ETag is optionally set to md5sum of an object. In case of multipart objects, @@ -115,14 +124,7 @@ type ObjectInfo struct { Owner Owner // ACL grant. - Grant []struct { - Grantee struct { - ID string `xml:"ID"` - DisplayName string `xml:"DisplayName"` - URI string `xml:"URI"` - } `xml:"Grantee"` - Permission string `xml:"Permission"` - } `xml:"Grant"` + Grant []Grant // The class of storage used to store the object. StorageClass string `json:"storageClass"` @@ -144,6 +146,8 @@ type ObjectInfo struct { Expiration time.Time ExpirationRuleID string + Restore *RestoreInfo + // Error Err error `json:"-"` } diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go index afa53079..031aa32e 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go @@ -19,25 +19,36 @@ package minio import ( "context" + "encoding/xml" "net/http" "net/url" ) +// Grantee represents the person being granted permissions. +type Grantee struct { + XMLName xml.Name `xml:"Grantee"` + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName"` + URI string `xml:"URI"` +} + +// Grant holds grant information +type Grant struct { + XMLName xml.Name `xml:"Grant"` + Grantee Grantee + Permission string `xml:"Permission"` +} + +// AccessControlList contains the set of grantees and the permissions assigned to each grantee. +type AccessControlList struct { + XMLName xml.Name `xml:"AccessControlList"` + Grant []Grant + Permission string `xml:"Permission"` +} + type accessControlPolicy struct { - Owner struct { - ID string `xml:"ID"` - DisplayName string `xml:"DisplayName"` - } `xml:"Owner"` - AccessControlList struct { - Grant []struct { - Grantee struct { - ID string `xml:"ID"` - DisplayName string `xml:"DisplayName"` - URI string `xml:"URI"` - } `xml:"Grantee"` - Permission string `xml:"Permission"` - } `xml:"Grant"` - } `xml:"AccessControlList"` + Owner + AccessControlList } // GetObjectACL get object ACLs diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go index 7996c11e..431bae54 100644 --- a/vendor/github.com/minio/minio-go/v7/api-list.go +++ b/vendor/github.com/minio/minio-go/v7/api-list.go @@ -56,14 +56,13 @@ func (c Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { return listAllMyBucketsResult.Buckets.Bucket, nil } -/// Bucket Read Operations. - -func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix string, recursive, metadata bool, maxKeys int) <-chan ObjectInfo { +/// Bucket List Operations. +func (c Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { // Allocate new list objects channel. objectStatCh := make(chan ObjectInfo, 1) // Default listing is delimited at "/" delimiter := "/" - if recursive { + if opts.Recursive { // If recursive we do not delimit. delimiter = "" } @@ -81,7 +80,7 @@ func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix stri } // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(objectStatCh) objectStatCh <- ObjectInfo{ Err: err, @@ -96,8 +95,8 @@ func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix stri var continuationToken string for { // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectsV2Query(ctx, bucketName, objectPrefix, continuationToken, - fetchOwner, metadata, delimiter, maxKeys) + result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken, + fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers) if err != nil { objectStatCh <- ObjectInfo{ Err: err, @@ -148,12 +147,13 @@ func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix stri // You can use the request parameters as selection criteria to return a subset of the objects in a bucket. // request parameters :- // --------- +// ?prefix - Limits the response to keys that begin with the specified prefix. // ?continuation-token - Used to continue iterating over a set of objects +// ?metadata - Specifies if we want metadata for the objects as part of list operation. // ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?start-after - Sets a marker to start listing lexically at this key onwards. // ?max-keys - Sets the maximum number of keys returned in the response body. -// ?metadata - Specifies if we want metadata for the objects as part of list operation. -func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, maxkeys int) (ListBucketV2Result, error) { +func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) { // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ListBucketV2Result{}, err @@ -173,6 +173,11 @@ func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix urlValues.Set("metadata", "true") } + // Set this conditionally if asked + if startAfter != "" { + urlValues.Set("start-after", startAfter) + } + // Always set encoding-type in ListObjects V2 urlValues.Set("encoding-type", "url") @@ -202,6 +207,7 @@ func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix bucketName: bucketName, queryValues: urlValues, contentSHA256Hex: emptySHA256Hex, + customHeader: headers, }) defer closeResponse(resp) if err != nil { @@ -246,12 +252,12 @@ func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix return listBucketResult, nil } -func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string, recursive bool, maxKeys int) <-chan ObjectInfo { +func (c Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { // Allocate new list objects channel. objectStatCh := make(chan ObjectInfo, 1) // Default listing is delimited at "/" delimiter := "/" - if recursive { + if opts.Recursive { // If recursive we do not delimit. delimiter = "" } @@ -264,7 +270,7 @@ func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string return objectStatCh } // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(objectStatCh) objectStatCh <- ObjectInfo{ Err: err, @@ -276,10 +282,10 @@ func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string go func(objectStatCh chan<- ObjectInfo) { defer close(objectStatCh) - marker := "" + marker := opts.StartAfter for { // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectsQuery(ctx, bucketName, objectPrefix, marker, delimiter, maxKeys) + result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers) if err != nil { objectStatCh <- ObjectInfo{ Err: err, @@ -326,12 +332,12 @@ func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string return objectStatCh } -func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix string, recursive bool, maxKeys int) <-chan ObjectInfo { +func (c Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { // Allocate new list objects channel. resultCh := make(chan ObjectInfo, 1) // Default listing is delimited at "/" delimiter := "/" - if recursive { + if opts.Recursive { // If recursive we do not delimit. delimiter = "" } @@ -346,7 +352,7 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix strin } // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(prefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(resultCh) resultCh <- ObjectInfo{ Err: err, @@ -365,7 +371,7 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix strin for { // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectVersionsQuery(ctx, bucketName, prefix, keyMarker, versionIDMarker, delimiter, maxKeys) + result, err := c.listObjectVersionsQuery(ctx, bucketName, opts.Prefix, keyMarker, versionIDMarker, delimiter, opts.MaxKeys, opts.headers) if err != nil { resultCh <- ObjectInfo{ Err: err, @@ -376,15 +382,14 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix strin // If contents are available loop through and send over channel. for _, version := range result.Versions { info := ObjectInfo{ - ETag: trimEtag(version.ETag), - Key: version.Key, - LastModified: version.LastModified, - Size: version.Size, - Owner: version.Owner, - StorageClass: version.StorageClass, - IsLatest: version.IsLatest, - VersionID: version.VersionID, - + ETag: trimEtag(version.ETag), + Key: version.Key, + LastModified: version.LastModified, + Size: version.Size, + Owner: version.Owner, + StorageClass: version.StorageClass, + IsLatest: version.IsLatest, + VersionID: version.VersionID, IsDeleteMarker: version.isDeleteMarker, } select { @@ -438,7 +443,7 @@ func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix strin // ?delimiter - A delimiter is a character you use to group keys. // ?prefix - Limits the response to keys that begin with the specified prefix. // ?max-keys - Sets the maximum number of keys returned in the response body. -func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int) (ListVersionsResult, error) { +func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int, headers http.Header) (ListVersionsResult, error) { // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ListVersionsResult{}, err @@ -483,6 +488,7 @@ func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, bucketName: bucketName, queryValues: urlValues, contentSHA256Hex: emptySHA256Hex, + customHeader: headers, }) defer closeResponse(resp) if err != nil { @@ -534,7 +540,7 @@ func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, // ?delimiter - A delimiter is a character you use to group keys. // ?prefix - Limits the response to keys that begin with the specified prefix. // ?max-keys - Sets the maximum number of keys returned in the response body. -func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) { +func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) { // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ListBucketResult{}, err @@ -571,6 +577,7 @@ func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, bucketName: bucketName, queryValues: urlValues, contentSHA256Hex: emptySHA256Hex, + customHeader: headers, }) defer closeResponse(resp) if err != nil { @@ -626,9 +633,25 @@ type ListObjectsOptions struct { // batch, advanced use-case not useful for most // applications MaxKeys int + // StartAfter start listing lexically at this + // object onwards, this value can also be set + // for Marker when `UseV1` is set to true. + StartAfter string // Use the deprecated list objects V1 API UseV1 bool + + headers http.Header +} + +// Set adds a key value pair to the options. The +// key-value pair will be part of the HTTP GET request +// headers. +func (o *ListObjectsOptions) Set(key, value string) { + if o.headers == nil { + o.headers = make(http.Header) + } + o.headers.Set(key, value) } // ListObjects returns objects list after evaluating the passed options. @@ -640,22 +663,22 @@ type ListObjectsOptions struct { // func (c Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { if opts.WithVersions { - return c.listObjectVersions(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys) + return c.listObjectVersions(ctx, bucketName, opts) } // Use legacy list objects v1 API if opts.UseV1 { - return c.listObjects(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys) + return c.listObjects(ctx, bucketName, opts) } // Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1. if location, ok := c.bucketLocCache.Get(bucketName); ok { if location == "snowball" { - return c.listObjects(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys) + return c.listObjects(ctx, bucketName, opts) } } - return c.listObjectsV2(ctx, bucketName, opts.Prefix, opts.Recursive, opts.WithMetadata, opts.MaxKeys) + return c.listObjectsV2(ctx, bucketName, opts) } // ListIncompleteUploads - List incompletely uploaded multipart objects. diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go index a70d7054..873ec387 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -176,7 +176,7 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) if err != nil { return UploadInfo{}, err } @@ -309,7 +309,7 @@ func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, - complete completeMultipartUpload) (UploadInfo, error) { + complete completeMultipartUpload, opts PutObjectOptions) (UploadInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err @@ -336,6 +336,7 @@ func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectN contentBody: completeMultipartUploadBuffer, contentLength: int64(len(completeMultipartUploadBytes)), contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), + customHeader: opts.Header(), } // Execute POST to complete multipart upload for an objectName. diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index 39e381e9..f1cc9fbb 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -231,7 +231,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) if err != nil { return UploadInfo{}, err } @@ -358,7 +358,7 @@ func (c Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bu // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) if err != nil { return UploadInfo{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index 247e40a6..f669b7d1 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -60,6 +60,9 @@ type AdvancedPutOptions struct { ReplicationStatus ReplicationStatus SourceMTime time.Time ReplicationRequest bool + RetentionTimestamp time.Time + TaggingTimestamp time.Time + LegalholdTimestamp time.Time } // PutObjectOptions represents options specified by user for PutObject call @@ -156,6 +159,16 @@ func (opts PutObjectOptions) Header() (header http.Header) { if opts.Internal.ReplicationRequest { header.Set(minIOBucketReplicationRequest, "") } + if !opts.Internal.LegalholdTimestamp.IsZero() { + header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) + } + if !opts.Internal.RetentionTimestamp.IsZero() { + header.Set(minIOBucketReplicationObjectRetentionTimestamp, opts.Internal.RetentionTimestamp.Format(time.RFC3339Nano)) + } + if !opts.Internal.TaggingTimestamp.IsZero() { + header.Set(minIOBucketReplicationTaggingTimestamp, opts.Internal.TaggingTimestamp.Format(time.RFC3339Nano)) + } + if len(opts.UserTags) != 0 { header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags)) } @@ -360,7 +373,7 @@ func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) if err != nil { return UploadInfo{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go index f21a72c9..24e4d3f5 100644 --- a/vendor/github.com/minio/minio-go/v7/api-remove.go +++ b/vendor/github.com/minio/minio-go/v7/api-remove.go @@ -29,6 +29,50 @@ import ( "github.com/minio/minio-go/v7/pkg/s3utils" ) +// BucketOptions special headers to purge buckets, only +// useful when endpoint is MinIO +type BucketOptions struct { + ForceDelete bool +} + +// RemoveBucketWithOptions deletes the bucket name. +// +// All objects (including all object versions and delete markers) +// in the bucket will be deleted forcibly if bucket options set +// ForceDelete to 'true'. +func (c Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, opts BucketOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Build headers. + headers := make(http.Header) + if opts.ForceDelete { + headers.Set(minIOForceDelete, "true") + } + + // Execute DELETE on bucket. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Remove the location from cache on a successful delete. + c.bucketLocCache.Delete(bucketName) + return nil +} + // RemoveBucket deletes the bucket name. // // All objects (including all object versions and delete markers). @@ -69,6 +113,7 @@ type AdvancedRemoveOptions struct { // RemoveObjectOptions represents options specified by user for RemoveObject call type RemoveObjectOptions struct { + ForceDelete bool GovernanceBypass bool VersionID string Internal AdvancedRemoveOptions @@ -116,6 +161,9 @@ func (c Client) removeObject(ctx context.Context, bucketName, objectName string, if opts.Internal.ReplicationRequest { headers.Set(minIOBucketReplicationRequest, "") } + if opts.ForceDelete { + headers.Set(minIOForceDelete, "true") + } // Execute DELETE on objectName. resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ bucketName: bucketName, diff --git a/vendor/github.com/minio/minio-go/v7/api-restore.go b/vendor/github.com/minio/minio-go/v7/api-restore.go new file mode 100644 index 00000000..dd7ce7a3 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-restore.go @@ -0,0 +1,182 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2018-2021 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// RestoreType represents the restore request type +type RestoreType string + +const ( + // RestoreSelect represents the restore SELECT operation + RestoreSelect = RestoreType("SELECT") +) + +// TierType represents a retrieval tier +type TierType string + +const ( + // TierStandard is the standard retrieval tier + TierStandard = TierType("Standard") + // TierBulk is the bulk retrieval tier + TierBulk = TierType("Bulk") + // TierExpedited is the expedited retrieval tier + TierExpedited = TierType("Expedited") +) + +// GlacierJobParameters represents the retrieval tier parameter +type GlacierJobParameters struct { + Tier TierType +} + +// Encryption contains the type of server-side encryption used during object retrieval +type Encryption struct { + EncryptionType string + KMSContext string + KMSKeyID string `xml:"KMSKeyId"` +} + +// MetadataEntry represents a metadata information of the restored object. +type MetadataEntry struct { + Name string + Value string +} + +// S3 holds properties of the copy of the archived object +type S3 struct { + AccessControlList *AccessControlList `xml:"AccessControlList,omiempty"` + BucketName string + Prefix string + CannedACL *string `xml:"CannedACL,omitempty"` + Encryption *Encryption `xml:"Encryption,omitempty"` + StorageClass *string `xml:"StorageClass,omitempty"` + Tagging *tags.Tags `xml:"Tagging,omitempty"` + UserMetadata *MetadataEntry `xml:"UserMetadata,omitempty"` +} + +// SelectParameters holds the select request parameters +type SelectParameters struct { + XMLName xml.Name `xml:"SelectParameters"` + ExpressionType QueryExpressionType + Expression string + InputSerialization SelectObjectInputSerialization + OutputSerialization SelectObjectOutputSerialization +} + +// OutputLocation holds properties of the copy of the archived object +type OutputLocation struct { + XMLName xml.Name `xml:"OutputLocation"` + S3 S3 `xml:"S3"` +} + +// RestoreRequest holds properties of the restore object request +type RestoreRequest struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest"` + Type *RestoreType `xml:"Type,omitempty"` + Tier *TierType `xml:"Tier,omitempty"` + Days *int `xml:"Days,omitempty"` + GlacierJobParameters *GlacierJobParameters `xml:"GlacierJobParameters,omitempty"` + Description *string `xml:"Description,omitempty"` + SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"` + OutputLocation *OutputLocation `xml:"OutputLocation,omitempty"` +} + +// SetDays sets the days parameter of the restore request +func (r *RestoreRequest) SetDays(v int) { + r.Days = &v +} + +// SetDays sets the GlacierJobParameters of the restore request +func (r *RestoreRequest) SetGlacierJobParameters(v GlacierJobParameters) { + r.GlacierJobParameters = &v +} + +// SetType sets the type of the restore request +func (r *RestoreRequest) SetType(v RestoreType) { + r.Type = &v +} + +// SetTier sets the retrieval tier of the restore request +func (r *RestoreRequest) SetTier(v TierType) { + r.Tier = &v +} + +// SetDescription sets the description of the restore request +func (r *RestoreRequest) SetDescription(v string) { + r.Description = &v +} + +// SetSelectParameters sets SelectParameters of the restore select request +func (r *RestoreRequest) SetSelectParameters(v SelectParameters) { + r.SelectParameters = &v +} + +// SetOutputLocation sets the properties of the copy of the archived object +func (r *RestoreRequest) SetOutputLocation(v OutputLocation) { + r.OutputLocation = &v +} + +// RestoreObject is a implementation of https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html AWS S3 API +func (c Client) RestoreObject(ctx context.Context, bucketName, objectName, versionID string, req RestoreRequest) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + restoreRequestBytes, err := xml.Marshal(req) + if err != nil { + return err + } + + urlValues := make(url.Values) + urlValues.Set("restore", "") + if versionID != "" { + urlValues.Set("versionId", versionID) + } + + // Execute POST on bucket/object. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentMD5Base64: sumMD5Base64(restoreRequestBytes), + contentSHA256Hex: sum256Hex(restoreRequestBytes), + contentBody: bytes.NewReader(restoreRequestBytes), + contentLength: int64(len(restoreRequestBytes)), + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusAccepted { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go index e35cf02b..c5e6d309 100644 --- a/vendor/github.com/minio/minio-go/v7/api-select.go +++ b/vendor/github.com/minio/minio-go/v7/api-select.go @@ -54,6 +54,13 @@ const ( SelectCompressionNONE SelectCompressionType = "NONE" SelectCompressionGZIP = "GZIP" SelectCompressionBZIP = "BZIP2" + + // Non-standard compression schemes, supported by MinIO hosts: + + SelectCompressionZSTD = "ZSTD" // Zstandard compression. + SelectCompressionLZ4 = "LZ4" // LZ4 Stream + SelectCompressionS2 = "S2" // S2 Stream + SelectCompressionSNAPPY = "SNAPPY" // Snappy stream ) // CSVQuoteFields - is the parameter for how CSV fields are quoted. @@ -330,10 +337,10 @@ func (j JSONOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) er // SelectObjectInputSerialization - input serialization parameters type SelectObjectInputSerialization struct { - CompressionType SelectCompressionType - Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` - CSV *CSVInputOptions `xml:"CSV,omitempty"` - JSON *JSONInputOptions `xml:"JSON,omitempty"` + CompressionType SelectCompressionType `xml:"CompressionType,omitempty"` + Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` + CSV *CSVInputOptions `xml:"CSV,omitempty"` + JSON *JSONInputOptions `xml:"JSON,omitempty"` } // SelectObjectOutputSerialization - output serialization parameters. diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go index aa81cc43..12a1bf93 100644 --- a/vendor/github.com/minio/minio-go/v7/api-stat.go +++ b/vendor/github.com/minio/minio-go/v7/api-stat.go @@ -99,11 +99,11 @@ func (c Client) statObject(ctx context.Context, bucketName, objectName string, o if err != nil { return ObjectInfo{}, err } - deleteMarker := resp.Header.Get(amzDeleteMarker) == "true" if resp != nil { + deleteMarker := resp.Header.Get(amzDeleteMarker) == "true" if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - if resp.StatusCode == http.StatusBadRequest && opts.VersionID != "" && deleteMarker { + if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker { errResp := ErrorResponse{ StatusCode: resp.StatusCode, Code: "MethodNotAllowed", diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 44660ab6..c8550ef1 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -34,6 +34,7 @@ import ( "runtime" "strings" "sync" + "sync/atomic" "time" md5simd "github.com/minio/md5-simd" @@ -90,6 +91,10 @@ type Client struct { // Factory for MD5 hash functions. md5Hasher func() md5simd.Hasher sha256Hasher func() md5simd.Hasher + + healthCheckCh chan struct{} + healthCheck int32 + lastOnline time.Time } // Options for New method @@ -108,7 +113,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.11" + libraryVersion = "v7.0.14" ) // User Agent should always following the below style. @@ -305,6 +310,10 @@ func privateNew(endpoint string, opts *Options) (*Client, error) { // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. clnt.lookup = opts.BucketLookup + + // healthcheck is not initialized + clnt.healthCheck = unknown + // Return. return clnt, nil } @@ -387,6 +396,72 @@ func (c *Client) hashMaterials(isMd5Requested bool) (hashAlgos map[string]md5sim return hashAlgos, hashSums } +const ( + unknown = -1 + offline = 0 + online = 1 +) + +// IsOnline returns true if healthcheck enabled and client is online +func (c *Client) IsOnline() bool { + switch atomic.LoadInt32(&c.healthCheck) { + case online, unknown: + return true + } + return false +} + +// IsOffline returns true if healthcheck enabled and client is offline +func (c *Client) IsOffline() bool { + return !c.IsOnline() +} + +// HealthCheck starts a healthcheck to see if endpoint is up. Returns a context cancellation function +// and and error if health check is already started +func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) { + if atomic.LoadInt32(&c.healthCheck) == online { + return nil, fmt.Errorf("health check running already") + } + if hcDuration < 1*time.Second { + return nil, fmt.Errorf("health check duration should be atleast 1 second") + } + ctx, cancelFn := context.WithCancel(context.Background()) + c.healthCheckCh = make(chan struct{}) + atomic.StoreInt32(&c.healthCheck, online) + probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-") + go func(duration time.Duration) { + timer := time.NewTimer(duration) + defer timer.Stop() + for { + select { + case <-ctx.Done(): + close(c.healthCheckCh) + atomic.StoreInt32(&c.healthCheck, unknown) + return + case <-timer.C: + + timer.Reset(duration) + // Do health check the first time and ONLY if the connection is marked offline + if c.IsOffline() || c.lastOnline.IsZero() { + _, err := c.getBucketLocation(context.Background(), probeBucketName) + if err != nil && IsNetworkOrHostDown(err, false) { + atomic.StoreInt32(&c.healthCheck, offline) + } + switch ToErrorResponse(err).Code { + case "NoSuchBucket", "AccessDenied", "": + c.lastOnline = time.Now() + atomic.StoreInt32(&c.healthCheck, online) + } + } + case <-c.healthCheckCh: + // set offline if client saw a network error + atomic.StoreInt32(&c.healthCheck, offline) + } + } + }(hcDuration) + return cancelFn, nil +} + // requestMetadata - is container for all the values to make a request. type requestMetadata struct { // If set newRequest presigns the URL. @@ -565,12 +640,25 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque if isS3CodeRetryable(errResponse.Code) { continue // Retry. } + + if atomic.LoadInt32(&c.healthCheck) != unknown && IsNetworkOrHostDown(err, false) { + select { + case c.healthCheckCh <- struct{}{}: + default: + } + } return nil, err } - // Initiate the request. res, err = c.do(req) if err != nil { + if atomic.LoadInt32(&c.healthCheck) != unknown && IsNetworkOrHostDown(err, false) { + select { + case c.healthCheckCh <- struct{}{}: + default: + } + } + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return nil, err } diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go index 2a2e6a0d..7caa42d9 100644 --- a/vendor/github.com/minio/minio-go/v7/constants.go +++ b/vendor/github.com/minio/minio-go/v7/constants.go @@ -69,6 +69,7 @@ const ( amzVersionID = "X-Amz-Version-Id" amzTaggingCount = "X-Amz-Tagging-Count" amzExpiration = "X-Amz-Expiration" + amzRestore = "X-Amz-Restore" amzReplicationStatus = "X-Amz-Replication-Status" amzDeleteMarker = "X-Amz-Delete-Marker" @@ -89,4 +90,12 @@ const ( minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker" minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request" minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request" + // Header indicates last tag update time on source + minIOBucketReplicationTaggingTimestamp = "X-Minio-Source-Replication-Tagging-Timestamp" + // Header indicates last retention update time on source + minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp" + // Header indicates last legalhold update time on source + minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp" + + minIOForceDelete = "x-minio-force-delete" ) diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go index 2bf4edf0..7bef7497 100644 --- a/vendor/github.com/minio/minio-go/v7/core.go +++ b/vendor/github.com/minio/minio-go/v7/core.go @@ -46,13 +46,13 @@ func NewCore(endpoint string, opts *Options) (*Core, error) { // ListObjects - List all the objects at a prefix, optionally with marker and delimiter // you can further filter the results. func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { - return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys) + return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys, nil) } // ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses // continuationToken instead of marker to support iteration over the results. -func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) { - return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, fetchOwner, false, delimiter, maxkeys) +func (c Core) ListObjectsV2(bucketName, objectPrefix, startAfter, continuationToken, delimiter string, maxkeys int) (ListBucketV2Result, error) { + return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, true, false, delimiter, startAfter, maxkeys, nil) } // CopyObject - copies an object from source object to destination object on server side. @@ -97,10 +97,10 @@ func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID stri } // CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. -func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart) (string, error) { +func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (string, error) { res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{ Parts: parts, - }) + }, opts) return res.ETag, err } diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index ba7ff577..7a168993 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -38,6 +38,7 @@ import ( "sort" "strconv" "strings" + "sync" "time" "github.com/dustin/go-humanize" @@ -1054,6 +1055,153 @@ func testGetObjectWithVersioning() { successLogger(testName, function, args, startTime).Info() } +func testPutObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + const n = 10 + // Read input... + + // Save the data concurrently. + var wg sync.WaitGroup + wg.Add(n) + var buffers = make([][]byte, n) + var errs [n]error + for i := 0; i < n; i++ { + r := newRandomReader(int64((1<<20)*i+i), int64(i)) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + buffers[i] = buf + + go func(i int) { + defer wg.Done() + _, errs[i] = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{PartSize: 5 << 20}) + }(i) + } + wg.Wait() + for _, err := range errs { + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + if len(results) != n { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + sort.Slice(results, func(i, j int) bool { + return results[i].Size < results[j].Size + }) + + sort.Slice(buffers, func(i, j int) bool { + return len(buffers[i]) < len(buffers[j]) + }) + + for i := 0; i < len(results); i++ { + opts := minio.GetObjectOptions{VersionID: results[i].VersionID} + reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during GET object", err) + return + } + statInfo, err := reader.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + + tmpBuffer := bytes.NewBuffer([]byte{}) + _, err = io.Copy(tmpBuffer, reader) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) + return + } + + if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { + logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + func testCopyObjectWithVersioning() { // initialize logging params startTime := time.Now() @@ -1191,6 +1339,166 @@ func testCopyObjectWithVersioning() { successLogger(testName, function, args, startTime).Info() } +func testConcurrentCopyObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + var testFiles = []string{"datafile-10-kB"} + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := ioutil.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var infos []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + infos = append(infos, info) + } + + sort.Slice(infos, func(i, j int) bool { + return infos[i].Size < infos[j].Size + }) + + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) + return + } + + oldestContent, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) + return + } + + // Copy Source + srcOpts := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: infos[0].VersionID, + } + args["src"] = srcOpts + + dstOpts := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + args["dst"] = dstOpts + + // Perform the Copy concurrently + const n = 10 + var wg sync.WaitGroup + wg.Add(n) + var errs [n]error + for i := 0; i < n; i++ { + go func(i int) { + defer wg.Done() + _, errs[i] = c.CopyObject(context.Background(), dstOpts, srcOpts) + }(i) + } + wg.Wait() + for _, err := range errs { + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + } + + objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: false, Prefix: dstOpts.Object}) + infos = []minio.ObjectInfo{} + for info := range objectsInfo { + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{VersionID: info.VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer readerCopy.Close() + + newestContent, err := ioutil.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) + return + } + + if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + infos = append(infos, info) + } + + if len(infos) != n { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + func testComposeObjectWithVersioning() { // initialize logging params startTime := time.Now() @@ -7548,7 +7856,7 @@ func testSSECMultipartEncryptedToSSECCopyObjectPart() { completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts) + _, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) return @@ -7606,7 +7914,7 @@ func testSSECMultipartEncryptedToSSECCopyObjectPart() { } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) return @@ -7783,7 +8091,7 @@ func testSSECEncryptedToSSECCopyObjectPart() { } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) return @@ -7959,7 +8267,7 @@ func testSSECEncryptedToUnencryptedCopyPart() { } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) return @@ -8138,7 +8446,7 @@ func testSSECEncryptedToSSES3CopyObjectPart() { } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) return @@ -8312,7 +8620,7 @@ func testUnencryptedToSSECCopyObjectPart() { } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) return @@ -8482,7 +8790,7 @@ func testUnencryptedToUnencryptedCopyPart() { } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) return @@ -8654,7 +8962,7 @@ func testUnencryptedToSSES3CopyObjectPart() { } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) return @@ -8829,7 +9137,7 @@ func testSSES3EncryptedToSSECCopyObjectPart() { } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) return @@ -9000,7 +9308,7 @@ func testSSES3EncryptedToUnencryptedCopyPart() { } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) return @@ -9174,7 +9482,7 @@ func testSSES3EncryptedToSSES3CopyObjectPart() { } // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) return @@ -11285,22 +11593,20 @@ func testRemoveObjects() { var reader = getDataReader("datafile-129-MB") defer reader.Close() - n, err := c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) if err != nil { - log.Fatalln(err) + logError(testName, function, args, startTime, "", "Error uploading object", err) } - log.Println("Uploaded", objectName, " of size: ", n, "to bucket: ", bucketName, "Successfully.") // Replace with smaller... bufSize = dataFileMap["datafile-10-kB"] reader = getDataReader("datafile-10-kB") defer reader.Close() - n, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) if err != nil { - log.Fatalln(err) + logError(testName, function, args, startTime, "", "Error uploading object", err) } - log.Println("Uploaded", objectName, " of size: ", n, "to bucket: ", bucketName, "Successfully.") t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC) m := minio.RetentionMode(minio.Governance) @@ -11416,6 +11722,7 @@ func main() { testFPutObjectContextV2() testFGetObjectContextV2() testPutObjectContextV2() + testPutObjectWithVersioning() testMakeBucketError() testMakeBucketRegions() testPutObjectWithMetadata() @@ -11453,6 +11760,7 @@ func main() { testStatObjectWithVersioning() testGetObjectWithVersioning() testCopyObjectWithVersioning() + testConcurrentCopyObjectWithVersioning() testComposeObjectWithVersioning() testRemoveObjectWithVersioning() testRemoveObjectsWithVersioning() diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go index 62d1701e..6b93a27f 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go @@ -22,8 +22,13 @@ import ( "time" ) -// STSVersion sts version string -const STSVersion = "2011-06-15" +const ( + // STSVersion sts version string + STSVersion = "2011-06-15" + + // How much duration to slash from the given expiration duration + defaultExpiryWindow = 0.8 +) // A Value is the AWS credentials value for individual credential fields. type Value struct { @@ -82,10 +87,15 @@ type Expiry struct { // the expiration time given to ensure no requests are made with expired // tokens. func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - e.expiration = expiration - if window > 0 { - e.expiration = e.expiration.Add(-window) + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + cut := window + if cut < 0 { + expireIn := expiration.Sub(e.CurrentTime()) + cut = time.Duration(float64(expireIn) * (1 - defaultExpiryWindow)) } + e.expiration = expiration.Add(-cut) } // IsExpired returns if the credentials are expired. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go index b532bcb6..bbd25ed8 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go @@ -38,7 +38,10 @@ import ( // prior to the credentials actually expiring. This is beneficial // so race conditions with expiring credentials do not cause // request to fail unexpectedly due to ExpiredTokenException exceptions. -const DefaultExpiryWindow = time.Second * 10 // 10 secs +// DefaultExpiryWindow can be used as parameter to (*Expiry).SetExpiration. +// When used the tokens refresh will be triggered when 80% of the elapsed +// time until the actual expiration time is passed. +const DefaultExpiryWindow = -1 // A IAM retrieves credentials from the EC2 service, and keeps track if // those credentials are expired. @@ -181,10 +184,6 @@ type ec2RoleCredRespBody struct { // be sent to fetch the rolling access credentials. // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html func getIAMRoleURL(endpoint string) (*url.URL, error) { - if endpoint == "" { - endpoint = defaultIAMRoleEndpoint - } - u, err := url.Parse(endpoint) if err != nil { return nil, err @@ -281,6 +280,10 @@ func fetchIMDSToken(client *http.Client, endpoint string) (string, error) { // If the credentials cannot be found, or there is an error // reading the response an error will be returned. func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { + if endpoint == "" { + endpoint = defaultIAMRoleEndpoint + } + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html token, _ := fetchIMDSToken(client, endpoint) diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go index bcb3c36a..0fa5b55f 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go @@ -1,6 +1,6 @@ /* * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 MinIO, Inc. + * Copyright 2019-2021 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ package credentials import ( "encoding/xml" "errors" + "fmt" "net/http" "net/url" "time" @@ -60,26 +61,86 @@ type LDAPIdentity struct { // LDAP username/password used to fetch LDAP STS credentials. LDAPUsername, LDAPPassword string + + // Session policy to apply to the generated credentials. Leave empty to + // use the full access policy available to the user. + Policy string + + // RequestedExpiry is the configured expiry duration for credentials + // requested from LDAP. + RequestedExpiry time.Duration } // NewLDAPIdentity returns new credentials object that uses LDAP // Identity. -func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string) (*Credentials, error) { +func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) { + l := LDAPIdentity{ + Client: &http.Client{Transport: http.DefaultTransport}, + STSEndpoint: stsEndpoint, + LDAPUsername: ldapUsername, + LDAPPassword: ldapPassword, + } + for _, optFunc := range optFuncs { + optFunc(&l) + } + return New(&l), nil +} + +// LDAPIdentityOpt is a function type used to configured the LDAPIdentity +// instance. +type LDAPIdentityOpt func(*LDAPIdentity) + +// LDAPIdentityPolicyOpt sets the session policy for requested credentials. +func LDAPIdentityPolicyOpt(policy string) LDAPIdentityOpt { + return func(k *LDAPIdentity) { + k.Policy = policy + } +} + +// LDAPIdentityExpiryOpt sets the expiry duration for requested credentials. +func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt { + return func(k *LDAPIdentity) { + k.RequestedExpiry = d + } +} + +func stripPassword(err error) error { + urlErr, ok := err.(*url.Error) + if ok { + u, _ := url.Parse(urlErr.URL) + if u == nil { + return urlErr + } + values := u.Query() + values.Set("LDAPPassword", "xxxxx") + u.RawQuery = values.Encode() + urlErr.URL = u.String() + return urlErr + } + return err +} + +// NewLDAPIdentityWithSessionPolicy returns new credentials object that uses +// LDAP Identity with a specified session policy. The `policy` parameter must be +// a JSON string specifying the policy document. +// +// DEPRECATED: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead. +func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) { return New(&LDAPIdentity{ Client: &http.Client{Transport: http.DefaultTransport}, STSEndpoint: stsEndpoint, LDAPUsername: ldapUsername, LDAPPassword: ldapPassword, + Policy: policy, }), nil } // Retrieve gets the credential by calling the MinIO STS API for // LDAP on the configured stsEndpoint. func (k *LDAPIdentity) Retrieve() (value Value, err error) { - u, kerr := url.Parse(k.STSEndpoint) - if kerr != nil { - err = kerr - return + u, err := url.Parse(k.STSEndpoint) + if err != nil { + return value, err } v := url.Values{} @@ -87,25 +148,28 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) { v.Set("Version", STSVersion) v.Set("LDAPUsername", k.LDAPUsername) v.Set("LDAPPassword", k.LDAPPassword) + if k.Policy != "" { + v.Set("Policy", k.Policy) + } + if k.RequestedExpiry != 0 { + v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds()))) + } u.RawQuery = v.Encode() - req, kerr := http.NewRequest(http.MethodPost, u.String(), nil) - if kerr != nil { - err = kerr - return + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return value, stripPassword(err) } - resp, kerr := k.Client.Do(req) - if kerr != nil { - err = kerr - return + resp, err := k.Client.Do(req) + if err != nil { + return value, stripPassword(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - err = errors.New(resp.Status) - return + return value, errors.New(resp.Status) } r := AssumeRoleWithLDAPResponse{} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go index 161ffd36..c1109140 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go @@ -54,8 +54,9 @@ type WebIdentityResult struct { // WebIdentityToken - web identity token with expiry. type WebIdentityToken struct { - Token string - Expiry int + Token string + AccessToken string + Expiry int } // A STSWebIdentity retrieves credentials from MinIO service, and keeps track if @@ -121,6 +122,10 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession v.Set("RoleSessionName", roleSessionName) } v.Set("WebIdentityToken", idToken.Token) + if idToken.AccessToken != "" { + // Usually set when server is using extended userInfo endpoint. + v.Set("WebIdentityAccessToken", idToken.AccessToken) + } if idToken.Expiry > 0 { v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry)) } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go index b6f9601b..83870a36 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go @@ -19,6 +19,7 @@ package lifecycle import ( + "encoding/json" "encoding/xml" "time" ) @@ -116,6 +117,26 @@ type Transition struct { Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"` } +// MarshalJSON customizes json encoding by omitting empty values +func (t Transition) MarshalJSON() ([]byte, error) { + type transition struct { + Date *ExpirationDate `json:"Date,omitempty"` + StorageClass string `json:"StorageClass,omitempty"` + Days *ExpirationDays `json:"Days,omitempty"` + } + + newt := transition{ + StorageClass: t.StorageClass, + } + if !t.IsDaysNull() { + newt.Days = &t.Days + } + if !t.IsDateNull() { + newt.Date = &t.Date + } + return json.Marshal(newt) +} + // IsDaysNull returns true if days field is null func (t Transition) IsDaysNull() bool { return t.Days == ExpirationDays(0) @@ -160,6 +181,31 @@ type Filter struct { Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` } +// IsNull returns true if all Filter fields are empty. +func (f Filter) IsNull() bool { + return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == "" +} + +// MarshalJSON customizes json encoding by removing empty values. +func (f Filter) MarshalJSON() ([]byte, error) { + type filter struct { + And *And `json:"And,omitempty"` + Prefix string `json:"Prefix,omitempty"` + Tag *Tag `json:"Tag,omitempty"` + } + + newf := filter{ + Prefix: f.Prefix, + } + if !f.Tag.IsEmpty() { + newf.Tag = &f.Tag + } + if !f.And.IsEmpty() { + newf.And = &f.And + } + return json.Marshal(newf) +} + // MarshalXML - produces the xml representation of the Filter struct // only one of Prefix, And and Tag should be present in the output. func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error { @@ -238,6 +284,26 @@ type Expiration struct { DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty"` } +// MarshalJSON customizes json encoding by removing empty day/date specification. +func (e Expiration) MarshalJSON() ([]byte, error) { + type expiration struct { + Date *ExpirationDate `json:"Date,omitempty"` + Days *ExpirationDays `json:"Days,omitempty"` + DeleteMarker ExpireDeleteMarker + } + + newexp := expiration{ + DeleteMarker: e.DeleteMarker, + } + if !e.IsDaysNull() { + newexp.Days = &e.Days + } + if !e.IsDateNull() { + newexp.Date = &e.Date + } + return json.Marshal(newexp) +} + // IsDaysNull returns true if days field is null func (e Expiration) IsDaysNull() bool { return e.Days == ExpirationDays(0) @@ -267,6 +333,47 @@ func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) e return en.EncodeElement(expirationWrapper(e), startElement) } +// MarshalJSON customizes json encoding by omitting empty values +func (r Rule) MarshalJSON() ([]byte, error) { + type rule struct { + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"` + Expiration *Expiration `json:"Expiration,omitempty"` + ID string `json:"ID"` + RuleFilter *Filter `json:"Filter,omitempty"` + NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"` + NoncurrentVersionTransition *NoncurrentVersionTransition `json:"NoncurrentVersionTransition,omitempty"` + Prefix string `json:"Prefix,omitempty"` + Status string `json:"Status"` + Transition *Transition `json:"Transition,omitempty"` + } + newr := rule{ + Prefix: r.Prefix, + Status: r.Status, + ID: r.ID, + } + + if !r.RuleFilter.IsNull() { + newr.RuleFilter = &r.RuleFilter + } + if !r.AbortIncompleteMultipartUpload.IsDaysNull() { + newr.AbortIncompleteMultipartUpload = &r.AbortIncompleteMultipartUpload + } + if !r.Expiration.IsNull() { + newr.Expiration = &r.Expiration + } + if !r.Transition.IsNull() { + newr.Transition = &r.Transition + } + if !r.NoncurrentVersionExpiration.IsDaysNull() { + newr.NoncurrentVersionExpiration = &r.NoncurrentVersionExpiration + } + if !r.NoncurrentVersionTransition.IsDaysNull() { + newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition + } + + return json.Marshal(newr) +} + // Rule represents a single rule in lifecycle configuration type Rule struct { XMLName xml.Name `xml:"Rule,omitempty" json:"-"` diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go index beacc71f..0211f1fb 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -47,13 +47,13 @@ const ( // Options represents options to set a replication configuration rule type Options struct { Op OptionType + RoleArn string ID string Prefix string RuleStatus string Priority string TagString string StorageClass string - RoleArn string DestBucket string IsTagSet bool IsSCSet bool @@ -103,9 +103,17 @@ func (c *Config) AddRule(opts Options) error { if err != nil { return err } - if opts.RoleArn != c.Role && c.Role != "" { - return fmt.Errorf("role ARN does not match existing configuration") + if opts.RoleArn != "" { + tokens := strings.Split(opts.RoleArn, ":") + if len(tokens) != 6 { + return fmt.Errorf("invalid format for replication Role Arn: %v", opts.RoleArn) + } + if !strings.HasPrefix(opts.RoleArn, "arn:aws:iam") { + return fmt.Errorf("RoleArn invalid for AWS replication configuration: %v", opts.RoleArn) + } + c.Role = opts.RoleArn } + var status Status // toggle rule status for edit option switch opts.RuleStatus { @@ -139,28 +147,11 @@ func (c *Config) AddRule(opts Options) error { if opts.ID == "" { opts.ID = xid.New().String() } - arnStr := opts.RoleArn - if opts.RoleArn == "" { - arnStr = c.Role - } - if arnStr == "" { - return fmt.Errorf("role ARN required") - } - tokens := strings.Split(arnStr, ":") - if len(tokens) != 6 { - return fmt.Errorf("invalid format for replication Arn") - } - if c.Role == "" { - c.Role = arnStr - } + destBucket := opts.DestBucket // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 { - if len(btokens) == 1 { - destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket) - } else { - return fmt.Errorf("destination bucket needs to be in Arn format") - } + return fmt.Errorf("destination bucket needs to be in Arn format") } dmStatus := Disabled if opts.ReplicateDeleteMarkers != "" { @@ -236,13 +227,18 @@ func (c *Config) AddRule(opts Options) error { if err := newRule.Validate(); err != nil { return err } + // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for MinIO configuration + if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") { + for i := range c.Rules { + c.Rules[i].Destination.Bucket = c.Role + } + c.Role = "" + } + for _, rule := range c.Rules { if rule.Priority == newRule.Priority { return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") } - if rule.Destination.Bucket != newRule.Destination.Bucket { - return fmt.Errorf("the destination bucket must be same for all rules") - } if rule.ID == newRule.ID { return fmt.Errorf("a rule exists with this ID") } @@ -257,6 +253,14 @@ func (c *Config) EditRule(opts Options) error { if opts.ID == "" { return fmt.Errorf("rule ID missing") } + // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for non AWS. + if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") { + for i := range c.Rules { + c.Rules[i].Destination.Bucket = c.Role + } + c.Role = "" + } + rIdx := -1 var newRule Rule for i, rule := range c.Rules { @@ -351,7 +355,7 @@ func (c *Config) EditRule(opts Options) error { return fmt.Errorf("replica metadata sync should be either [enable|disable]") } } - fmt.Println("opts.ExistingObjectReplicate>", opts.ExistingObjectReplicate) + if opts.ExistingObjectReplicate != "" { switch opts.ExistingObjectReplicate { case "enable": @@ -376,11 +380,7 @@ func (c *Config) EditRule(opts Options) error { destBucket := opts.DestBucket // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 { - if len(btokens) == 1 { - destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket) - } else { - return fmt.Errorf("destination bucket needs to be in Arn format") - } + return fmt.Errorf("destination bucket needs to be in Arn format") } newRule.Destination.Bucket = destBucket } @@ -393,8 +393,8 @@ func (c *Config) EditRule(opts Options) error { if rule.Priority == newRule.Priority && rIdx != idx { return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") } - if rule.Destination.Bucket != newRule.Destination.Bucket { - return fmt.Errorf("the destination bucket must be same for all rules") + if rule.Destination.Bucket != newRule.Destination.Bucket && rule.ID == newRule.ID { + return fmt.Errorf("invalid destination bucket for this rule") } } @@ -678,9 +678,9 @@ func (e ExistingObjectReplication) Validate() error { return nil } -// Metrics represents inline replication metrics -// such as pending, failed and completed bytes in total for a bucket -type Metrics struct { +// TargetMetrics represents inline replication metrics +// such as pending, failed and completed bytes in total for a bucket remote target +type TargetMetrics struct { // Pending size in bytes PendingSize uint64 `json:"pendingReplicationSize"` // Completed size in bytes @@ -694,3 +694,28 @@ type Metrics struct { // Total number of failed operations including metadata updates FailedCount uint64 `json:"failedReplicationCount"` } + +// Metrics represents inline replication metrics for a bucket. +type Metrics struct { + Stats map[string]TargetMetrics + // Total Pending size in bytes across targets + PendingSize uint64 `json:"pendingReplicationSize"` + // Completed size in bytes across targets + ReplicatedSize uint64 `json:"completedReplicationSize"` + // Total Replica size in bytes across targets + ReplicaSize uint64 `json:"replicaSize"` + // Failed size in bytes across targets + FailedSize uint64 `json:"failedReplicationSize"` + // Total number of pending operations including metadata updates across targets + PendingCount uint64 `json:"pendingReplicationCount"` + // Total number of failed operations including metadata updates across targets + FailedCount uint64 `json:"failedReplicationCount"` +} + +type ResyncTargetsInfo struct { + Targets []ResyncTarget `json:"target,omitempty"` +} +type ResyncTarget struct { + Arn string `json:"arn"` + ResetID string `json:"resetid"` +} diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index 4bdf1a3c..e7f90a3b 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -18,14 +18,17 @@ package minio import ( + "context" "crypto/md5" "encoding/base64" "encoding/hex" "encoding/xml" + "errors" "fmt" "hash" "io" "io/ioutil" + "math/rand" "net" "net/http" "net/url" @@ -58,6 +61,26 @@ func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) { return time.Time{}, "" } +var restoreRegex = regexp.MustCompile(`ongoing-request="(.*?)"(, expiry-date="(.*?)")?`) + +func amzRestoreToStruct(restore string) (ongoing bool, expTime time.Time, err error) { + matches := restoreRegex.FindStringSubmatch(restore) + if len(matches) != 4 { + return false, time.Time{}, errors.New("unexpected restore header") + } + ongoing, err = strconv.ParseBool(matches[1]) + if err != nil { + return false, time.Time{}, err + } + if matches[3] != "" { + expTime, err = time.Parse(http.TimeFormat, matches[3]) + if err != nil { + return false, time.Time{}, err + } + } + return +} + // xmlDecoder provide decoded value in xml. func xmlDecoder(body io.Reader, v interface{}) error { d := xml.NewDecoder(body) @@ -294,6 +317,16 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn } } + // Nil if not found + var restore *RestoreInfo + if restoreHdr := h.Get(amzRestore); restoreHdr != "" { + ongoing, expTime, err := amzRestoreToStruct(restoreHdr) + if err != nil { + return ObjectInfo{}, err + } + restore = &RestoreInfo{OngoingRestore: ongoing, ExpiryTime: expTime} + } + // extract lifecycle expiry date and rule ID expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration)) @@ -319,6 +352,7 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn UserMetadata: userMetadata, UserTags: userTags, UserTagCount: tagCount, + Restore: restore, }, nil } @@ -397,19 +431,20 @@ func getDefaultLocation(u url.URL, regionOverride string) (location string) { return region } -var supportedHeaders = []string{ - "content-type", - "cache-control", - "content-encoding", - "content-disposition", - "content-language", - "x-amz-website-redirect-location", - "x-amz-object-lock-mode", - "x-amz-metadata-directive", - "x-amz-object-lock-retain-until-date", - "expires", - "x-amz-replication-status", +var supportedHeaders = map[string]bool{ + "content-type": true, + "cache-control": true, + "content-encoding": true, + "content-disposition": true, + "content-language": true, + "x-amz-website-redirect-location": true, + "x-amz-object-lock-mode": true, + "x-amz-metadata-directive": true, + "x-amz-object-lock-retain-until-date": true, + "expires": true, + "x-amz-replication-status": true, // Add more supported headers here. + // Must be lower case. } // isStorageClassHeader returns true if the header is a supported storage class header @@ -419,34 +454,24 @@ func isStorageClassHeader(headerKey string) bool { // isStandardHeader returns true if header is a supported header and not a custom header func isStandardHeader(headerKey string) bool { - key := strings.ToLower(headerKey) - for _, header := range supportedHeaders { - if strings.ToLower(header) == key { - return true - } - } - return false + return supportedHeaders[strings.ToLower(headerKey)] } // sseHeaders is list of server side encryption headers -var sseHeaders = []string{ - "x-amz-server-side-encryption", - "x-amz-server-side-encryption-aws-kms-key-id", - "x-amz-server-side-encryption-context", - "x-amz-server-side-encryption-customer-algorithm", - "x-amz-server-side-encryption-customer-key", - "x-amz-server-side-encryption-customer-key-MD5", +var sseHeaders = map[string]bool{ + "x-amz-server-side-encryption": true, + "x-amz-server-side-encryption-aws-kms-key-id": true, + "x-amz-server-side-encryption-context": true, + "x-amz-server-side-encryption-customer-algorithm": true, + "x-amz-server-side-encryption-customer-key": true, + "x-amz-server-side-encryption-customer-key-md5": true, + // Add more supported headers here. + // Must be lower case. } // isSSEHeader returns true if header is a server side encryption header. func isSSEHeader(headerKey string) bool { - key := strings.ToLower(headerKey) - for _, h := range sseHeaders { - if strings.ToLower(h) == key { - return true - } - } - return false + return sseHeaders[strings.ToLower(headerKey)] } // isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header. @@ -486,3 +511,79 @@ func (m hashWrapper) Close() { } m.Hash = nil } + +const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits + letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits +) + +// randString generates random names and prepends them with a known prefix. +func randString(n int, src rand.Source, prefix string) string { + b := make([]byte, n) + // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters! + for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + return prefix + string(b[0:30-len(prefix)]) +} + +// IsNetworkOrHostDown - if there was a network error or if the host is down. +// expectTimeouts indicates that *context* timeouts are expected and does not +// indicate a downed host. Other timeouts still returns down. +func IsNetworkOrHostDown(err error, expectTimeouts bool) bool { + if err == nil { + return false + } + + if errors.Is(err, context.Canceled) { + return false + } + + if expectTimeouts && errors.Is(err, context.DeadlineExceeded) { + return false + } + // We need to figure if the error either a timeout + // or a non-temporary error. + urlErr := &url.Error{} + if errors.As(err, &urlErr) { + switch urlErr.Err.(type) { + case *net.DNSError, *net.OpError, net.UnknownNetworkError: + return true + } + } + var e net.Error + if errors.As(err, &e) { + if e.Timeout() { + return true + } + } + + // Fallback to other mechanisms. + switch { + case strings.Contains(err.Error(), "Connection closed by foreign host"): + return true + case strings.Contains(err.Error(), "TLS handshake timeout"): + // If error is - tlsHandshakeTimeoutError. + return true + case strings.Contains(err.Error(), "i/o timeout"): + // If error is - tcp timeoutError. + return true + case strings.Contains(err.Error(), "connection timed out"): + // If err is a net.Dial timeout. + return true + case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"): + // Denial errors + return true + } + return false +} diff --git a/vendor/github.com/modern-go/reflect2/.travis.yml b/vendor/github.com/modern-go/reflect2/.travis.yml index fbb43744..b097728d 100644 --- a/vendor/github.com/modern-go/reflect2/.travis.yml +++ b/vendor/github.com/modern-go/reflect2/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - 1.8.x + - 1.9.x - 1.x before_install: diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.lock b/vendor/github.com/modern-go/reflect2/Gopkg.lock index 2a3a6989..10ef8111 100644 --- a/vendor/github.com/modern-go/reflect2/Gopkg.lock +++ b/vendor/github.com/modern-go/reflect2/Gopkg.lock @@ -1,15 +1,9 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. -[[projects]] - name = "github.com/modern-go/concurrent" - packages = ["."] - revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" - version = "1.0.0" - [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "daee8a88b3498b61c5640056665b8b9eea062006f5e596bbb6a3ed9119a11ec7" + input-imports = [] solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.toml b/vendor/github.com/modern-go/reflect2/Gopkg.toml index 2f4f4dbd..a9bc5061 100644 --- a/vendor/github.com/modern-go/reflect2/Gopkg.toml +++ b/vendor/github.com/modern-go/reflect2/Gopkg.toml @@ -26,10 +26,6 @@ ignored = [] -[[constraint]] - name = "github.com/modern-go/concurrent" - version = "1.0.0" - [prune] go-tests = true unused-packages = true diff --git a/vendor/github.com/modern-go/reflect2/go_above_118.go b/vendor/github.com/modern-go/reflect2/go_above_118.go new file mode 100644 index 00000000..2b4116f6 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_above_118.go @@ -0,0 +1,23 @@ +//+build go1.18 + +package reflect2 + +import ( + "unsafe" +) + +// m escapes into the return value, but the caller of mapiterinit +// doesn't let the return value escape. +//go:noescape +//go:linkname mapiterinit reflect.mapiterinit +func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer, it *hiter) + +func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { + var it hiter + mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj), &it) + return &UnsafeMapIterator{ + hiter: &it, + pKeyRType: type2.pKeyRType, + pElemRType: type2.pElemRType, + } +}
\ No newline at end of file diff --git a/vendor/github.com/modern-go/reflect2/go_above_17.go b/vendor/github.com/modern-go/reflect2/go_above_17.go deleted file mode 100644 index 5c1cea86..00000000 --- a/vendor/github.com/modern-go/reflect2/go_above_17.go +++ /dev/null @@ -1,8 +0,0 @@ -//+build go1.7 - -package reflect2 - -import "unsafe" - -//go:linkname resolveTypeOff reflect.resolveTypeOff -func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer diff --git a/vendor/github.com/modern-go/reflect2/go_above_19.go b/vendor/github.com/modern-go/reflect2/go_above_19.go index c7e3b780..974f7685 100644 --- a/vendor/github.com/modern-go/reflect2/go_above_19.go +++ b/vendor/github.com/modern-go/reflect2/go_above_19.go @@ -6,6 +6,9 @@ import ( "unsafe" ) +//go:linkname resolveTypeOff reflect.resolveTypeOff +func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer + //go:linkname makemap reflect.makemap func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer) diff --git a/vendor/github.com/modern-go/reflect2/go_below_118.go b/vendor/github.com/modern-go/reflect2/go_below_118.go new file mode 100644 index 00000000..00003dbd --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_below_118.go @@ -0,0 +1,21 @@ +//+build !go1.18 + +package reflect2 + +import ( + "unsafe" +) + +// m escapes into the return value, but the caller of mapiterinit +// doesn't let the return value escape. +//go:noescape +//go:linkname mapiterinit reflect.mapiterinit +func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) (val *hiter) + +func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { + return &UnsafeMapIterator{ + hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)), + pKeyRType: type2.pKeyRType, + pElemRType: type2.pElemRType, + } +}
\ No newline at end of file diff --git a/vendor/github.com/modern-go/reflect2/go_below_17.go b/vendor/github.com/modern-go/reflect2/go_below_17.go deleted file mode 100644 index 65a93c88..00000000 --- a/vendor/github.com/modern-go/reflect2/go_below_17.go +++ /dev/null @@ -1,9 +0,0 @@ -//+build !go1.7 - -package reflect2 - -import "unsafe" - -func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { - return nil -} diff --git a/vendor/github.com/modern-go/reflect2/go_below_19.go b/vendor/github.com/modern-go/reflect2/go_below_19.go deleted file mode 100644 index b050ef70..00000000 --- a/vendor/github.com/modern-go/reflect2/go_below_19.go +++ /dev/null @@ -1,14 +0,0 @@ -//+build !go1.9 - -package reflect2 - -import ( - "unsafe" -) - -//go:linkname makemap reflect.makemap -func makemap(rtype unsafe.Pointer) (m unsafe.Pointer) - -func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer { - return makemap(rtype) -} diff --git a/vendor/github.com/modern-go/reflect2/reflect2.go b/vendor/github.com/modern-go/reflect2/reflect2.go index 63b49c79..c43c8b9d 100644 --- a/vendor/github.com/modern-go/reflect2/reflect2.go +++ b/vendor/github.com/modern-go/reflect2/reflect2.go @@ -1,8 +1,9 @@ package reflect2 import ( - "github.com/modern-go/concurrent" "reflect" + "runtime" + "sync" "unsafe" ) @@ -130,13 +131,13 @@ var ConfigSafe = Config{UseSafeImplementation: true}.Froze() type frozenConfig struct { useSafeImplementation bool - cache *concurrent.Map + cache *sync.Map } func (cfg Config) Froze() *frozenConfig { return &frozenConfig{ useSafeImplementation: cfg.UseSafeImplementation, - cache: concurrent.NewMap(), + cache: new(sync.Map), } } @@ -288,11 +289,12 @@ func NoEscape(p unsafe.Pointer) unsafe.Pointer { } func UnsafeCastString(str string) []byte { + bytes := make([]byte, 0) stringHeader := (*reflect.StringHeader)(unsafe.Pointer(&str)) - sliceHeader := &reflect.SliceHeader{ - Data: stringHeader.Data, - Cap: stringHeader.Len, - Len: stringHeader.Len, - } - return *(*[]byte)(unsafe.Pointer(sliceHeader)) + sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&bytes)) + sliceHeader.Data = stringHeader.Data + sliceHeader.Cap = stringHeader.Len + sliceHeader.Len = stringHeader.Len + runtime.KeepAlive(str) + return bytes } diff --git a/vendor/github.com/modern-go/reflect2/test.sh b/vendor/github.com/modern-go/reflect2/test.sh deleted file mode 100644 index 3d2b9768..00000000 --- a/vendor/github.com/modern-go/reflect2/test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do - go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go index 3acfb558..4b13c315 100644 --- a/vendor/github.com/modern-go/reflect2/type_map.go +++ b/vendor/github.com/modern-go/reflect2/type_map.go @@ -1,17 +1,13 @@ +// +build !gccgo + package reflect2 import ( "reflect" - "runtime" - "strings" "sync" "unsafe" ) -// typelinks1 for 1.5 ~ 1.6 -//go:linkname typelinks1 reflect.typelinks -func typelinks1() [][]unsafe.Pointer - // typelinks2 for 1.7 ~ //go:linkname typelinks2 reflect.typelinks func typelinks2() (sections []unsafe.Pointer, offset [][]int32) @@ -27,49 +23,10 @@ func discoverTypes() { types = make(map[string]reflect.Type) packages = make(map[string]map[string]reflect.Type) - ver := runtime.Version() - if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") { - loadGo15Types() - } else if ver == "go1.6" || strings.HasPrefix(ver, "go1.6.") { - loadGo15Types() - } else { - loadGo17Types() - } -} - -func loadGo15Types() { - var obj interface{} = reflect.TypeOf(0) - typePtrss := typelinks1() - for _, typePtrs := range typePtrss { - for _, typePtr := range typePtrs { - (*emptyInterface)(unsafe.Pointer(&obj)).word = typePtr - typ := obj.(reflect.Type) - if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct { - loadedType := typ.Elem() - pkgTypes := packages[loadedType.PkgPath()] - if pkgTypes == nil { - pkgTypes = map[string]reflect.Type{} - packages[loadedType.PkgPath()] = pkgTypes - } - types[loadedType.String()] = loadedType - pkgTypes[loadedType.Name()] = loadedType - } - if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Ptr && - typ.Elem().Elem().Kind() == reflect.Struct { - loadedType := typ.Elem().Elem() - pkgTypes := packages[loadedType.PkgPath()] - if pkgTypes == nil { - pkgTypes = map[string]reflect.Type{} - packages[loadedType.PkgPath()] = pkgTypes - } - types[loadedType.String()] = loadedType - pkgTypes[loadedType.Name()] = loadedType - } - } - } + loadGoTypes() } -func loadGo17Types() { +func loadGoTypes() { var obj interface{} = reflect.TypeOf(0) sections, offset := typelinks2() for i, offs := range offset { diff --git a/vendor/github.com/modern-go/reflect2/unsafe_link.go b/vendor/github.com/modern-go/reflect2/unsafe_link.go index 57229c8d..b49f614e 100644 --- a/vendor/github.com/modern-go/reflect2/unsafe_link.go +++ b/vendor/github.com/modern-go/reflect2/unsafe_link.go @@ -19,18 +19,12 @@ func typedslicecopy(elemType unsafe.Pointer, dst, src sliceHeader) int //go:linkname mapassign reflect.mapassign //go:noescape -func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key, val unsafe.Pointer) +func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer, val unsafe.Pointer) //go:linkname mapaccess reflect.mapaccess //go:noescape func mapaccess(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer) -// m escapes into the return value, but the caller of mapiterinit -// doesn't let the return value escape. -//go:noescape -//go:linkname mapiterinit reflect.mapiterinit -func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) *hiter - //go:noescape //go:linkname mapiternext reflect.mapiternext func mapiternext(it *hiter) @@ -42,9 +36,21 @@ func ifaceE2I(rtype unsafe.Pointer, src interface{}, dst unsafe.Pointer) // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate // the layout of this structure. type hiter struct { - key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go). - value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go). - // rest fields are ignored + key unsafe.Pointer + value unsafe.Pointer + t unsafe.Pointer + h unsafe.Pointer + buckets unsafe.Pointer + bptr unsafe.Pointer + overflow *[]unsafe.Pointer + oldoverflow *[]unsafe.Pointer + startBucket uintptr + offset uint8 + wrapped bool + B uint8 + i uint8 + bucket uintptr + checkBucket uintptr } // add returns p+x. diff --git a/vendor/github.com/modern-go/reflect2/unsafe_map.go b/vendor/github.com/modern-go/reflect2/unsafe_map.go index f2e76e6b..37872da8 100644 --- a/vendor/github.com/modern-go/reflect2/unsafe_map.go +++ b/vendor/github.com/modern-go/reflect2/unsafe_map.go @@ -107,14 +107,6 @@ func (type2 *UnsafeMapType) Iterate(obj interface{}) MapIterator { return type2.UnsafeIterate(objEFace.data) } -func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { - return &UnsafeMapIterator{ - hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)), - pKeyRType: type2.pKeyRType, - pElemRType: type2.pElemRType, - } -} - type UnsafeMapIterator struct { *hiter pKeyRType unsafe.Pointer diff --git a/vendor/github.com/slack-go/slack/block.go b/vendor/github.com/slack-go/slack/block.go index 3686db85..240f5527 100644 --- a/vendor/github.com/slack-go/slack/block.go +++ b/vendor/github.com/slack-go/slack/block.go @@ -9,14 +9,15 @@ package slack type MessageBlockType string const ( - MBTSection MessageBlockType = "section" - MBTDivider MessageBlockType = "divider" - MBTImage MessageBlockType = "image" - MBTAction MessageBlockType = "actions" - MBTContext MessageBlockType = "context" - MBTFile MessageBlockType = "file" - MBTInput MessageBlockType = "input" - MBTHeader MessageBlockType = "header" + MBTSection MessageBlockType = "section" + MBTDivider MessageBlockType = "divider" + MBTImage MessageBlockType = "image" + MBTAction MessageBlockType = "actions" + MBTContext MessageBlockType = "context" + MBTFile MessageBlockType = "file" + MBTInput MessageBlockType = "input" + MBTHeader MessageBlockType = "header" + MBTRichText MessageBlockType = "rich_text" ) // Block defines an interface all block types should implement diff --git a/vendor/github.com/slack-go/slack/block_conv.go b/vendor/github.com/slack-go/slack/block_conv.go index 6936700a..c5378b60 100644 --- a/vendor/github.com/slack-go/slack/block_conv.go +++ b/vendor/github.com/slack-go/slack/block_conv.go @@ -65,6 +65,8 @@ func (b *Blocks) UnmarshalJSON(data []byte) error { block = &ImageBlock{} case "input": block = &InputBlock{} + case "rich_text": + block = &RichTextBlock{} case "section": block = &SectionBlock{} default: diff --git a/vendor/github.com/slack-go/slack/block_element.go b/vendor/github.com/slack-go/slack/block_element.go index bf068440..21abb018 100644 --- a/vendor/github.com/slack-go/slack/block_element.go +++ b/vendor/github.com/slack-go/slack/block_element.go @@ -389,13 +389,18 @@ func NewTimePickerBlockElement(actionID string) *TimePickerBlockElement { // // More Information: https://api.slack.com/reference/block-kit/block-elements#input type PlainTextInputBlockElement struct { - Type MessageElementType `json:"type"` - ActionID string `json:"action_id,omitempty"` - Placeholder *TextBlockObject `json:"placeholder,omitempty"` - InitialValue string `json:"initial_value,omitempty"` - Multiline bool `json:"multiline,omitempty"` - MinLength int `json:"min_length,omitempty"` - MaxLength int `json:"max_length,omitempty"` + Type MessageElementType `json:"type"` + ActionID string `json:"action_id,omitempty"` + Placeholder *TextBlockObject `json:"placeholder,omitempty"` + InitialValue string `json:"initial_value,omitempty"` + Multiline bool `json:"multiline,omitempty"` + MinLength int `json:"min_length,omitempty"` + MaxLength int `json:"max_length,omitempty"` + DispatchActionConfig *DispatchActionConfig `json:"dispatch_action_config,omitempty"` +} + +type DispatchActionConfig struct { + TriggerActionsOn []string `json:"trigger_actions_on,omitempty"` } // ElementType returns the type of the Element diff --git a/vendor/github.com/slack-go/slack/block_rich_text.go b/vendor/github.com/slack-go/slack/block_rich_text.go new file mode 100644 index 00000000..281db213 --- /dev/null +++ b/vendor/github.com/slack-go/slack/block_rich_text.go @@ -0,0 +1,383 @@ +package slack + +import ( + "encoding/json" +) + +// RichTextBlock defines a new block of type rich_text. +// More Information: https://api.slack.com/changelog/2019-09-what-they-see-is-what-you-get-and-more-and-less +type RichTextBlock struct { + Type MessageBlockType `json:"type"` + BlockID string `json:"block_id,omitempty"` + Elements []RichTextElement `json:"elements"` +} + +func (b RichTextBlock) BlockType() MessageBlockType { + return b.Type +} + +func (e *RichTextBlock) UnmarshalJSON(b []byte) error { + var raw struct { + Type MessageBlockType `json:"type"` + BlockID string `json:"block_id"` + RawElements []json.RawMessage `json:"elements"` + } + if string(b) == "{}" { + return nil + } + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + elems := make([]RichTextElement, 0, len(raw.RawElements)) + for _, r := range raw.RawElements { + var s struct { + Type RichTextElementType `json:"type"` + } + if err := json.Unmarshal(r, &s); err != nil { + return err + } + var elem RichTextElement + switch s.Type { + case RTESection: + elem = &RichTextSection{} + default: + elems = append(elems, &RichTextUnknown{ + Type: s.Type, + Raw: string(r), + }) + continue + } + if err := json.Unmarshal(r, &elem); err != nil { + return err + } + elems = append(elems, elem) + } + *e = RichTextBlock{ + Type: raw.Type, + BlockID: raw.BlockID, + Elements: elems, + } + return nil +} + +// NewRichTextBlock returns a new instance of RichText Block. +func NewRichTextBlock(blockID string, elements ...RichTextElement) *RichTextBlock { + return &RichTextBlock{ + Type: MBTRichText, + BlockID: blockID, + Elements: elements, + } +} + +type RichTextElementType string + +type RichTextElement interface { + RichTextElementType() RichTextElementType +} + +const ( + RTEList RichTextElementType = "rich_text_list" + RTEPreformatted RichTextElementType = "rich_text_preformatted" + RTEQuote RichTextElementType = "rich_text_quote" + RTESection RichTextElementType = "rich_text_section" + RTEUnknown RichTextElementType = "rich_text_unknown" +) + +type RichTextUnknown struct { + Type RichTextElementType + Raw string +} + +func (u RichTextUnknown) RichTextElementType() RichTextElementType { + return u.Type +} + +type RichTextSection struct { + Type RichTextElementType `json:"type"` + Elements []RichTextSectionElement `json:"elements"` +} + +// ElementType returns the type of the Element +func (s RichTextSection) RichTextElementType() RichTextElementType { + return s.Type +} + +func (e *RichTextSection) UnmarshalJSON(b []byte) error { + var raw struct { + RawElements []json.RawMessage `json:"elements"` + } + if string(b) == "{}" { + return nil + } + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + elems := make([]RichTextSectionElement, 0, len(raw.RawElements)) + for _, r := range raw.RawElements { + var s struct { + Type RichTextSectionElementType `json:"type"` + } + if err := json.Unmarshal(r, &s); err != nil { + return err + } + var elem RichTextSectionElement + switch s.Type { + case RTSEText: + elem = &RichTextSectionTextElement{} + case RTSEChannel: + elem = &RichTextSectionChannelElement{} + case RTSEUser: + elem = &RichTextSectionUserElement{} + case RTSEEmoji: + elem = &RichTextSectionEmojiElement{} + case RTSELink: + elem = &RichTextSectionLinkElement{} + case RTSETeam: + elem = &RichTextSectionTeamElement{} + case RTSEUserGroup: + elem = &RichTextSectionUserGroupElement{} + case RTSEDate: + elem = &RichTextSectionDateElement{} + case RTSEBroadcast: + elem = &RichTextSectionBroadcastElement{} + case RTSEColor: + elem = &RichTextSectionColorElement{} + default: + elems = append(elems, &RichTextSectionUnknownElement{ + Type: s.Type, + Raw: string(r), + }) + continue + } + if err := json.Unmarshal(r, elem); err != nil { + return err + } + elems = append(elems, elem) + } + *e = RichTextSection{ + Type: RTESection, + Elements: elems, + } + return nil +} + +// NewRichTextSectionBlockElement . +func NewRichTextSection(elements ...RichTextSectionElement) *RichTextSection { + return &RichTextSection{ + Type: RTESection, + Elements: elements, + } +} + +type RichTextSectionElementType string + +const ( + RTSEBroadcast RichTextSectionElementType = "broadcast" + RTSEChannel RichTextSectionElementType = "channel" + RTSEColor RichTextSectionElementType = "color" + RTSEDate RichTextSectionElementType = "date" + RTSEEmoji RichTextSectionElementType = "emoji" + RTSELink RichTextSectionElementType = "link" + RTSETeam RichTextSectionElementType = "team" + RTSEText RichTextSectionElementType = "text" + RTSEUser RichTextSectionElementType = "user" + RTSEUserGroup RichTextSectionElementType = "usergroup" + + RTSEUnknown RichTextSectionElementType = "unknown" +) + +type RichTextSectionElement interface { + RichTextSectionElementType() RichTextSectionElementType +} + +type RichTextSectionTextStyle struct { + Bold bool `json:"bold,omitempty"` + Italic bool `json:"italic,omitempty"` + Strike bool `json:"strike,omitempty"` + Code bool `json:"code,omitempty"` +} + +type RichTextSectionTextElement struct { + Type RichTextSectionElementType `json:"type"` + Text string `json:"text"` + Style *RichTextSectionTextStyle `json:"style,omitempty"` +} + +func (r RichTextSectionTextElement) RichTextSectionElementType() RichTextSectionElementType { + return r.Type +} + +func NewRichTextSectionTextElement(text string, style *RichTextSectionTextStyle) *RichTextSectionTextElement { + return &RichTextSectionTextElement{ + Type: RTSEText, + Text: text, + Style: style, + } +} + +type RichTextSectionChannelElement struct { + Type RichTextSectionElementType `json:"type"` + ChannelID string `json:"channel_id"` + Style *RichTextSectionTextStyle `json:"style,omitempty"` +} + +func (r RichTextSectionChannelElement) RichTextSectionElementType() RichTextSectionElementType { + return r.Type +} + +func NewRichTextSectionChannelElement(channelID string, style *RichTextSectionTextStyle) *RichTextSectionChannelElement { + return &RichTextSectionChannelElement{ + Type: RTSEText, + ChannelID: channelID, + Style: style, + } +} + +type RichTextSectionUserElement struct { + Type RichTextSectionElementType `json:"type"` + UserID string `json:"user_id"` + Style *RichTextSectionTextStyle `json:"style,omitempty"` +} + +func (r RichTextSectionUserElement) RichTextSectionElementType() RichTextSectionElementType { + return r.Type +} + +func NewRichTextSectionUserElement(userID string, style *RichTextSectionTextStyle) *RichTextSectionUserElement { + return &RichTextSectionUserElement{ + Type: RTSEUser, + UserID: userID, + Style: style, + } +} + +type RichTextSectionEmojiElement struct { + Type RichTextSectionElementType `json:"type"` + Name string `json:"name"` + SkinTone int `json:"skin_tone"` + Style *RichTextSectionTextStyle `json:"style,omitempty"` +} + +func (r RichTextSectionEmojiElement) RichTextSectionElementType() RichTextSectionElementType { + return r.Type +} + +func NewRichTextSectionEmojiElement(name string, skinTone int, style *RichTextSectionTextStyle) *RichTextSectionEmojiElement { + return &RichTextSectionEmojiElement{ + Type: RTSEEmoji, + Name: name, + SkinTone: skinTone, + Style: style, + } +} + +type RichTextSectionLinkElement struct { + Type RichTextSectionElementType `json:"type"` + URL string `json:"url"` + Text string `json:"text"` + Style *RichTextSectionTextStyle `json:"style,omitempty"` +} + +func (r RichTextSectionLinkElement) RichTextSectionElementType() RichTextSectionElementType { + return r.Type +} + +func NewRichTextSectionLinkElement(url, text string, style *RichTextSectionTextStyle) *RichTextSectionLinkElement { + return &RichTextSectionLinkElement{ + Type: RTSELink, + URL: url, + Text: text, + Style: style, + } +} + +type RichTextSectionTeamElement struct { + Type RichTextSectionElementType `json:"type"` + TeamID string `json:"team_id"` + Style *RichTextSectionTextStyle `json:"style.omitempty"` +} + +func (r RichTextSectionTeamElement) RichTextSectionElementType() RichTextSectionElementType { + return r.Type +} + +func NewRichTextSectionTeamElement(teamID string, style *RichTextSectionTextStyle) *RichTextSectionTeamElement { + return &RichTextSectionTeamElement{ + Type: RTSETeam, + TeamID: teamID, + Style: style, + } +} + +type RichTextSectionUserGroupElement struct { + Type RichTextSectionElementType `json:"type"` + UsergroupID string `json:"usergroup_id"` +} + +func (r RichTextSectionUserGroupElement) RichTextSectionElementType() RichTextSectionElementType { + return r.Type +} + +func NewRichTextSectionUserGroupElement(usergroupID string) *RichTextSectionUserGroupElement { + return &RichTextSectionUserGroupElement{ + Type: RTSEUserGroup, + UsergroupID: usergroupID, + } +} + +type RichTextSectionDateElement struct { + Type RichTextSectionElementType `json:"type"` + Timestamp string `json:"timestamp"` +} + +func (r RichTextSectionDateElement) RichTextSectionElementType() RichTextSectionElementType { + return r.Type +} + +func NewRichTextSectionDateElement(timestamp string) *RichTextSectionDateElement { + return &RichTextSectionDateElement{ + Type: RTSEDate, + Timestamp: timestamp, + } +} + +type RichTextSectionBroadcastElement struct { + Type RichTextSectionElementType `json:"type"` + Range string `json:"range"` +} + +func (r RichTextSectionBroadcastElement) RichTextSectionElementType() RichTextSectionElementType { + return r.Type +} + +func NewRichTextSectionBroadcastElement(rangeStr string) *RichTextSectionBroadcastElement { + return &RichTextSectionBroadcastElement{ + Type: RTSEBroadcast, + Range: rangeStr, + } +} + +type RichTextSectionColorElement struct { + Type RichTextSectionElementType `json:"type"` + Value string `json:"value"` +} + +func (r RichTextSectionColorElement) RichTextSectionElementType() RichTextSectionElementType { + return r.Type +} + +func NewRichTextSectionColorElement(value string) *RichTextSectionColorElement { + return &RichTextSectionColorElement{ + Type: RTSEColor, + Value: value, + } +} + +type RichTextSectionUnknownElement struct { + Type RichTextSectionElementType `json:"type"` + Raw string +} + +func (r RichTextSectionUnknownElement) RichTextSectionElementType() RichTextSectionElementType { + return r.Type +} diff --git a/vendor/github.com/slack-go/slack/chat.go b/vendor/github.com/slack-go/slack/chat.go index 2a1e452e..493b65b6 100644 --- a/vendor/github.com/slack-go/slack/chat.go +++ b/vendor/github.com/slack-go/slack/chat.go @@ -188,7 +188,12 @@ func (api *Client) UpdateMessageContext(ctx context.Context, channelID, timestam // UnfurlMessage unfurls a message in a channel func (api *Client) UnfurlMessage(channelID, timestamp string, unfurls map[string]Attachment, options ...MsgOption) (string, string, string, error) { - return api.SendMessageContext(context.Background(), channelID, MsgOptionUnfurl(timestamp, unfurls), MsgOptionCompose(options...)) + return api.UnfurlMessageContext(context.Background(), channelID, timestamp, unfurls, options...) +} + +// UnfurlMessageContext unfurls a message in a channel with a custom context +func (api *Client) UnfurlMessageContext(ctx context.Context, channelID, timestamp string, unfurls map[string]Attachment, options ...MsgOption) (string, string, string, error) { + return api.SendMessageContext(ctx, channelID, MsgOptionUnfurl(timestamp, unfurls), MsgOptionCompose(options...)) } // UnfurlMessageWithAuthURL sends an unfurl request containing an diff --git a/vendor/github.com/slack-go/slack/messages.go b/vendor/github.com/slack-go/slack/messages.go index 999df5d9..2f05f6d7 100644 --- a/vendor/github.com/slack-go/slack/messages.go +++ b/vendor/github.com/slack-go/slack/messages.go @@ -19,6 +19,38 @@ type Message struct { PreviousMessage *Msg `json:"previous_message,omitempty"` } +// Msg SubTypes (https://api.slack.com/events/message) +const ( + MsgSubTypeBotMessage = "bot_message" // [Events API, RTM] A message was posted by an integration + MsgSubTypeMeMessage = "me_message" // [Events API, RTM] A /me message was sent + MsgSubTypeMessageChanged = "message_changed" // [Events API, RTM] A message was changed + MsgSubTypeMessageDeleted = "message_deleted" // [Events API, RTM] A message was deleted + MsgSubTypeMessageReplied = "message_replied" // [Events API, RTM] A message thread received a reply + MsgSubTypeReplyBroadcast = "reply_broadcast" // @Deprecated (No longer served) A message thread's reply was broadcast to a channel + MsgSubTypeThreadBroadcast = "thread_broadcast" // [Events API, RTM] A message thread's reply was broadcast to a channel + MsgSubTypeChannelJoin = "channel_join" // [Events API, RTM] A member joined a channel + MsgSubTypeChannelLeave = "channel_leave" // [Events API, RTM] A member left a channel + MsgSubTypeChannelTopic = "channel_topic" // [Events API, RTM] A channel topic was updated + MsgSubTypeChannelPurpose = "channel_purpose" // [Events API, RTM] A channel purpose was updated + MsgSubTypeChannelName = "channel_name" // [Events API, RTM] A channel was renamed + MsgSubTypeChannelArchive = "channel_archive" // [Events API, RTM] A channel was archived + MsgSubTypeChannelUnarchive = "channel_unarchive" // [Events API, RTM] A channel was unarchived + MsgSubTypeGroupJoin = "group_join" // [RTM] A member joined a group + MsgSubTypeGroupLeave = "group_leave" // [RTM] A member left a group + MsgSubTypeGroupTopic = "group_topic" // [RTM] A group topic was updated + MsgSubTypeGroupPurpose = "group_purpose" // [RTM] A group purpose was updated + MsgSubTypeGroupName = "group_name" // [RTM] A group was renamed + MsgSubTypeGroupArchive = "group_archive" // [RTM] A group was archived + MsgSubTypeGroupUnarchive = "group_unarchive" // [RTM] A group was unarchived + MsgSubTypeFileShare = "file_share" // [Events API, RTM] A file was shared into a channel + MsgSubTypeFileComment = "file_comment" // [RTM] A comment was added to a file + MsgSubTypeGileMention = "file_mention" // [RTM] A file was mentioned in a channel + MsgSubTypePinnedItem = "pinned_item" // [RTM] An item was pinned in a channel + MsgSubTypeUnpinnedItem = "unpinned_item" // [RTM] An item was unpinned from a channel + MsgSubTypeEkmAccessDenied = "ekm_access_denied" // [Events API, RTM] Message content redacted due to Enterprise Key Management (EKM) + MsgSubTypeChannelPostingPermissions = "channel_posting_permissions" // [Events API, RTM] The posting permissions for a channel changed +) + // Msg contains information about a slack message type Msg struct { // Basic Message diff --git a/vendor/github.com/slack-go/slack/misc.go b/vendor/github.com/slack-go/slack/misc.go index 8c50305b..5272e7c4 100644 --- a/vendor/github.com/slack-go/slack/misc.go +++ b/vendor/github.com/slack-go/slack/misc.go @@ -41,9 +41,17 @@ func (t SlackResponse) Err() error { return nil } - return errors.New(t.Error) + return SlackErrorResponse{Err: t.Error, ResponseMetadata: t.ResponseMetadata} } +// SlackErrorResponse brings along the metadata of errors returned by the Slack API. +type SlackErrorResponse struct { + Err string + ResponseMetadata ResponseMetadata +} + +func (r SlackErrorResponse) Error() string { return r.Err } + // RateLimitedError represents the rate limit respond from slack type RateLimitedError struct { RetryAfter time.Duration diff --git a/vendor/github.com/slack-go/slack/oauth.go b/vendor/github.com/slack-go/slack/oauth.go index 707ccc6b..d9aca5f3 100644 --- a/vendor/github.com/slack-go/slack/oauth.go +++ b/vendor/github.com/slack-go/slack/oauth.go @@ -42,6 +42,8 @@ type OAuthV2Response struct { IncomingWebhook OAuthResponseIncomingWebhook `json:"incoming_webhook"` Enterprise OAuthV2ResponseEnterprise `json:"enterprise"` AuthedUser OAuthV2ResponseAuthedUser `json:"authed_user"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` SlackResponse } @@ -132,3 +134,23 @@ func GetOAuthV2ResponseContext(ctx context.Context, client httpClient, clientID, } return response, response.Err() } + +// RefreshOAuthV2AccessContext with a context, gets a V2 OAuth access token response +func RefreshOAuthV2Token(client httpClient, clientID, clientSecret, refreshToken string) (resp *OAuthV2Response, err error) { + return RefreshOAuthV2TokenContext(context.Background(), client, clientID, clientSecret, refreshToken) +} + +// RefreshOAuthV2AccessContext with a context, gets a V2 OAuth access token response +func RefreshOAuthV2TokenContext(ctx context.Context, client httpClient, clientID, clientSecret, refreshToken string) (resp *OAuthV2Response, err error) { + values := url.Values{ + "client_id": {clientID}, + "client_secret": {clientSecret}, + "refresh_token": {refreshToken}, + "grant_type": {"refresh_token"}, + } + response := &OAuthV2Response{} + if err = postForm(ctx, client, APIURL+"oauth.v2.access", values, response, discard{}); err != nil { + return nil, err + } + return response, response.Err() +} diff --git a/vendor/github.com/slack-go/slack/users.go b/vendor/github.com/slack-go/slack/users.go index 3696e37f..87311569 100644 --- a/vendor/github.com/slack-go/slack/users.go +++ b/vendor/github.com/slack-go/slack/users.go @@ -469,9 +469,7 @@ func (api *Client) SetUserPhoto(image string, params UserSetPhotoParams) error { // SetUserPhotoContext changes the currently authenticated user's profile image using a custom context func (api *Client) SetUserPhotoContext(ctx context.Context, image string, params UserSetPhotoParams) (err error) { response := &SlackResponse{} - values := url.Values{ - "token": {api.token}, - } + values := url.Values{} if params.CropX != DEFAULT_USER_PHOTO_CROP_X { values.Add("crop_x", strconv.Itoa(params.CropX)) } diff --git a/vendor/github.com/slack-go/slack/webhooks.go b/vendor/github.com/slack-go/slack/webhooks.go index 39fff441..97346e1c 100644 --- a/vendor/github.com/slack-go/slack/webhooks.go +++ b/vendor/github.com/slack-go/slack/webhooks.go @@ -15,6 +15,9 @@ type WebhookMessage struct { Attachments []Attachment `json:"attachments,omitempty"` Parse string `json:"parse,omitempty"` Blocks *Blocks `json:"blocks,omitempty"` + ResponseType string `json:"response_type,omitempty"` + ReplaceOriginal bool `json:"replace_original,omitempty"` + DeleteOriginal bool `json:"delete_original,omitempty"` } func PostWebhook(url string, msg *WebhookMessage) error { |