diff options
Diffstat (limited to 'vendor/github.com')
108 files changed, 9119 insertions, 1220 deletions
diff --git a/vendor/github.com/bwmarrin/discordgo/.travis.yml b/vendor/github.com/bwmarrin/discordgo/.travis.yml index 5d9cea3e..e80d490b 100644 --- a/vendor/github.com/bwmarrin/discordgo/.travis.yml +++ b/vendor/github.com/bwmarrin/discordgo/.travis.yml @@ -4,6 +4,8 @@ go: - 1.14.x - 1.15.x - 1.16.x + - 1.17.x + - 1.18.x env: - GO111MODULE=on install: diff --git a/vendor/github.com/bwmarrin/discordgo/components.go b/vendor/github.com/bwmarrin/discordgo/components.go index 00cbbf19..6ee4e289 100644 --- a/vendor/github.com/bwmarrin/discordgo/components.go +++ b/vendor/github.com/bwmarrin/discordgo/components.go @@ -70,7 +70,7 @@ type ActionsRow struct { func (r ActionsRow) MarshalJSON() ([]byte, error) { type actionsRow ActionsRow - return json.Marshal(struct { + return Marshal(struct { actionsRow Type ComponentType `json:"type"` }{ @@ -145,7 +145,7 @@ func (b Button) MarshalJSON() ([]byte, error) { b.Style = PrimaryButton } - return json.Marshal(struct { + return Marshal(struct { button Type ComponentType `json:"type"` }{ @@ -192,7 +192,7 @@ func (SelectMenu) Type() ComponentType { func (m SelectMenu) MarshalJSON() ([]byte, error) { type selectMenu SelectMenu - return json.Marshal(struct { + return Marshal(struct { selectMenu Type ComponentType `json:"type"` }{ @@ -208,7 +208,7 @@ type TextInput struct { Style TextInputStyle `json:"style"` Placeholder string `json:"placeholder,omitempty"` Value string `json:"value,omitempty"` - Required bool `json:"required,omitempty"` + Required bool `json:"required"` MinLength int `json:"min_length,omitempty"` MaxLength int `json:"max_length,omitempty"` } @@ -222,7 +222,7 @@ func (TextInput) Type() ComponentType { func (m TextInput) MarshalJSON() ([]byte, error) { type inputText TextInput - return json.Marshal(struct { + return Marshal(struct { inputText Type ComponentType `json:"type"` }{ diff --git a/vendor/github.com/bwmarrin/discordgo/discord.go b/vendor/github.com/bwmarrin/discordgo/discord.go index 74f4190a..db853549 100644 --- a/vendor/github.com/bwmarrin/discordgo/discord.go +++ b/vendor/github.com/bwmarrin/discordgo/discord.go @@ -20,7 +20,7 @@ import ( ) // VERSION of DiscordGo, follows Semantic Versioning. (http://semver.org/) -const VERSION = "0.24.0" +const VERSION = "0.25.0" // New creates a new Discord session with provided token. // If the token is for a bot, it must be prefixed with "Bot " @@ -36,6 +36,7 @@ func New(token string) (s *Session, err error) { StateEnabled: true, Compress: true, ShouldReconnectOnError: true, + ShouldRetryOnRateLimit: true, ShardID: 0, ShardCount: 1, MaxRestRetries: 3, @@ -49,7 +50,6 @@ func New(token string) (s *Session, err error) { // These can be modified prior to calling Open() s.Identify.Compress = true s.Identify.LargeThreshold = 250 - s.Identify.GuildSubscriptions = true s.Identify.Properties.OS = runtime.GOOS s.Identify.Properties.Browser = "DiscordGo v" + VERSION s.Identify.Intents = IntentsAllWithoutPrivileged diff --git a/vendor/github.com/bwmarrin/discordgo/endpoints.go b/vendor/github.com/bwmarrin/discordgo/endpoints.go index d39a175b..f5822da6 100644 --- a/vendor/github.com/bwmarrin/discordgo/endpoints.go +++ b/vendor/github.com/bwmarrin/discordgo/endpoints.go @@ -23,15 +23,16 @@ var ( EndpointSmActive = EndpointSm + "active.json" EndpointSmUpcoming = EndpointSm + "upcoming.json" - EndpointDiscord = "https://discord.com/" - EndpointAPI = EndpointDiscord + "api/v" + APIVersion + "/" - EndpointGuilds = EndpointAPI + "guilds/" - EndpointChannels = EndpointAPI + "channels/" - EndpointUsers = EndpointAPI + "users/" - EndpointGateway = EndpointAPI + "gateway" - EndpointGatewayBot = EndpointGateway + "/bot" - EndpointWebhooks = EndpointAPI + "webhooks/" - EndpointStickers = EndpointAPI + "stickers/" + EndpointDiscord = "https://discord.com/" + EndpointAPI = EndpointDiscord + "api/v" + APIVersion + "/" + EndpointGuilds = EndpointAPI + "guilds/" + EndpointChannels = EndpointAPI + "channels/" + EndpointUsers = EndpointAPI + "users/" + EndpointGateway = EndpointAPI + "gateway" + EndpointGatewayBot = EndpointGateway + "/bot" + EndpointWebhooks = EndpointAPI + "webhooks/" + EndpointStickers = EndpointAPI + "stickers/" + EndpointStageInstances = EndpointAPI + "stage-instances" EndpointCDN = "https://cdn.discordapp.com/" EndpointCDNAttachments = EndpointCDN + "attachments/" @@ -72,6 +73,7 @@ var ( EndpointGuildPreview = func(gID string) string { return EndpointGuilds + gID + "/preview" } EndpointGuildChannels = func(gID string) string { return EndpointGuilds + gID + "/channels" } EndpointGuildMembers = func(gID string) string { return EndpointGuilds + gID + "/members" } + EndpointGuildMembersSearch = func(gID string) string { return EndpointGuildMembers(gID) + "/search" } EndpointGuildMember = func(gID, uID string) string { return EndpointGuilds + gID + "/members/" + uID } EndpointGuildMemberRole = func(gID, uID, rID string) string { return EndpointGuilds + gID + "/members/" + uID + "/roles/" + rID } EndpointGuildBans = func(gID string) string { return EndpointGuilds + gID + "/bans" } @@ -94,6 +96,7 @@ var ( EndpointGuildBanner = func(gID, hash string) string { return EndpointCDNBanners + gID + "/" + hash + ".png" } EndpointGuildStickers = func(gID string) string { return EndpointGuilds + gID + "/stickers" } EndpointGuildSticker = func(gID, sID string) string { return EndpointGuilds + gID + "/stickers/" + sID } + EndpointStageInstance = func(cID string) string { return EndpointStageInstances + "/" + cID } EndpointGuildScheduledEvents = func(gID string) string { return EndpointGuilds + gID + "/scheduled-events" } EndpointGuildScheduledEvent = func(gID, eID string) string { return EndpointGuilds + gID + "/scheduled-events/" + eID } EndpointGuildScheduledEventUsers = func(gID, eID string) string { return EndpointGuildScheduledEvent(gID, eID) + "/users" } diff --git a/vendor/github.com/bwmarrin/discordgo/eventhandlers.go b/vendor/github.com/bwmarrin/discordgo/eventhandlers.go index 18d6248a..d0e382f8 100644 --- a/vendor/github.com/bwmarrin/discordgo/eventhandlers.go +++ b/vendor/github.com/bwmarrin/discordgo/eventhandlers.go @@ -7,62 +7,67 @@ package discordgo // Event type values are used to match the events returned by Discord. // EventTypes surrounded by __ are synthetic and are internal to DiscordGo. const ( - channelCreateEventType = "CHANNEL_CREATE" - channelDeleteEventType = "CHANNEL_DELETE" - channelPinsUpdateEventType = "CHANNEL_PINS_UPDATE" - channelUpdateEventType = "CHANNEL_UPDATE" - connectEventType = "__CONNECT__" - disconnectEventType = "__DISCONNECT__" - eventEventType = "__EVENT__" - guildBanAddEventType = "GUILD_BAN_ADD" - guildBanRemoveEventType = "GUILD_BAN_REMOVE" - guildCreateEventType = "GUILD_CREATE" - guildDeleteEventType = "GUILD_DELETE" - guildEmojisUpdateEventType = "GUILD_EMOJIS_UPDATE" - guildIntegrationsUpdateEventType = "GUILD_INTEGRATIONS_UPDATE" - guildMemberAddEventType = "GUILD_MEMBER_ADD" - guildMemberRemoveEventType = "GUILD_MEMBER_REMOVE" - guildMemberUpdateEventType = "GUILD_MEMBER_UPDATE" - guildMembersChunkEventType = "GUILD_MEMBERS_CHUNK" - guildRoleCreateEventType = "GUILD_ROLE_CREATE" - guildRoleDeleteEventType = "GUILD_ROLE_DELETE" - guildRoleUpdateEventType = "GUILD_ROLE_UPDATE" - guildUpdateEventType = "GUILD_UPDATE" - guildScheduledEventCreateEventType = "GUILD_SCHEDULED_EVENT_CREATE" - guildScheduledEventUpdateEventType = "GUILD_SCHEDULED_EVENT_UPDATE" - guildScheduledEventDeleteEventType = "GUILD_SCHEDULED_EVENT_DELETE" - interactionCreateEventType = "INTERACTION_CREATE" - inviteCreateEventType = "INVITE_CREATE" - inviteDeleteEventType = "INVITE_DELETE" - messageAckEventType = "MESSAGE_ACK" - messageCreateEventType = "MESSAGE_CREATE" - messageDeleteEventType = "MESSAGE_DELETE" - messageDeleteBulkEventType = "MESSAGE_DELETE_BULK" - messageReactionAddEventType = "MESSAGE_REACTION_ADD" - messageReactionRemoveEventType = "MESSAGE_REACTION_REMOVE" - messageReactionRemoveAllEventType = "MESSAGE_REACTION_REMOVE_ALL" - messageUpdateEventType = "MESSAGE_UPDATE" - presenceUpdateEventType = "PRESENCE_UPDATE" - presencesReplaceEventType = "PRESENCES_REPLACE" - rateLimitEventType = "__RATE_LIMIT__" - readyEventType = "READY" - relationshipAddEventType = "RELATIONSHIP_ADD" - relationshipRemoveEventType = "RELATIONSHIP_REMOVE" - resumedEventType = "RESUMED" - threadCreateEventType = "THREAD_CREATE" - threadDeleteEventType = "THREAD_DELETE" - threadListSyncEventType = "THREAD_LIST_SYNC" - threadMemberUpdateEventType = "THREAD_MEMBER_UPDATE" - threadMembersUpdateEventType = "THREAD_MEMBERS_UPDATE" - threadUpdateEventType = "THREAD_UPDATE" - typingStartEventType = "TYPING_START" - userGuildSettingsUpdateEventType = "USER_GUILD_SETTINGS_UPDATE" - userNoteUpdateEventType = "USER_NOTE_UPDATE" - userSettingsUpdateEventType = "USER_SETTINGS_UPDATE" - userUpdateEventType = "USER_UPDATE" - voiceServerUpdateEventType = "VOICE_SERVER_UPDATE" - voiceStateUpdateEventType = "VOICE_STATE_UPDATE" - webhooksUpdateEventType = "WEBHOOKS_UPDATE" + channelCreateEventType = "CHANNEL_CREATE" + channelDeleteEventType = "CHANNEL_DELETE" + channelPinsUpdateEventType = "CHANNEL_PINS_UPDATE" + channelUpdateEventType = "CHANNEL_UPDATE" + connectEventType = "__CONNECT__" + disconnectEventType = "__DISCONNECT__" + eventEventType = "__EVENT__" + guildBanAddEventType = "GUILD_BAN_ADD" + guildBanRemoveEventType = "GUILD_BAN_REMOVE" + guildCreateEventType = "GUILD_CREATE" + guildDeleteEventType = "GUILD_DELETE" + guildEmojisUpdateEventType = "GUILD_EMOJIS_UPDATE" + guildIntegrationsUpdateEventType = "GUILD_INTEGRATIONS_UPDATE" + guildMemberAddEventType = "GUILD_MEMBER_ADD" + guildMemberRemoveEventType = "GUILD_MEMBER_REMOVE" + guildMemberUpdateEventType = "GUILD_MEMBER_UPDATE" + guildMembersChunkEventType = "GUILD_MEMBERS_CHUNK" + guildRoleCreateEventType = "GUILD_ROLE_CREATE" + guildRoleDeleteEventType = "GUILD_ROLE_DELETE" + guildRoleUpdateEventType = "GUILD_ROLE_UPDATE" + guildStageInstanceCreateEventType = "STAGE_INSTANCE_CREATE" + guildStageInstanceUpdateEventType = "STAGE_INSTANCE_UPDATE" + guildStageInstanceDeleteEventType = "STAGE_INSTANCE_DELETE" + guildScheduledEventCreateEventType = "GUILD_SCHEDULED_EVENT_CREATE" + guildScheduledEventDeleteEventType = "GUILD_SCHEDULED_EVENT_DELETE" + guildScheduledEventUpdateEventType = "GUILD_SCHEDULED_EVENT_UPDATE" + guildScheduledEventUserAddEventType = "GUILD_SCHEDULED_EVENT_USER_ADD" + guildScheduledEventUserRemoveEventType = "GUILD_SCHEDULED_EVENT_USER_REMOVE" + guildUpdateEventType = "GUILD_UPDATE" + interactionCreateEventType = "INTERACTION_CREATE" + inviteCreateEventType = "INVITE_CREATE" + inviteDeleteEventType = "INVITE_DELETE" + messageAckEventType = "MESSAGE_ACK" + messageCreateEventType = "MESSAGE_CREATE" + messageDeleteEventType = "MESSAGE_DELETE" + messageDeleteBulkEventType = "MESSAGE_DELETE_BULK" + messageReactionAddEventType = "MESSAGE_REACTION_ADD" + messageReactionRemoveEventType = "MESSAGE_REACTION_REMOVE" + messageReactionRemoveAllEventType = "MESSAGE_REACTION_REMOVE_ALL" + messageUpdateEventType = "MESSAGE_UPDATE" + presenceUpdateEventType = "PRESENCE_UPDATE" + presencesReplaceEventType = "PRESENCES_REPLACE" + rateLimitEventType = "__RATE_LIMIT__" + readyEventType = "READY" + relationshipAddEventType = "RELATIONSHIP_ADD" + relationshipRemoveEventType = "RELATIONSHIP_REMOVE" + resumedEventType = "RESUMED" + threadCreateEventType = "THREAD_CREATE" + threadDeleteEventType = "THREAD_DELETE" + threadListSyncEventType = "THREAD_LIST_SYNC" + threadMemberUpdateEventType = "THREAD_MEMBER_UPDATE" + threadMembersUpdateEventType = "THREAD_MEMBERS_UPDATE" + threadUpdateEventType = "THREAD_UPDATE" + typingStartEventType = "TYPING_START" + userGuildSettingsUpdateEventType = "USER_GUILD_SETTINGS_UPDATE" + userNoteUpdateEventType = "USER_NOTE_UPDATE" + userSettingsUpdateEventType = "USER_SETTINGS_UPDATE" + userUpdateEventType = "USER_UPDATE" + voiceServerUpdateEventType = "VOICE_SERVER_UPDATE" + voiceStateUpdateEventType = "VOICE_STATE_UPDATE" + webhooksUpdateEventType = "WEBHOOKS_UPDATE" ) // channelCreateEventHandler is an event handler for ChannelCreate events. @@ -310,66 +315,6 @@ func (eh guildIntegrationsUpdateEventHandler) Handle(s *Session, i interface{}) } } -// guildScheduledEventCreateEventHandler is an event handler for GuildScheduledEventCreate events. -type guildScheduledEventCreateEventHandler func(*Session, *GuildScheduledEventCreate) - -// Type returns the event type for GuildScheduledEventCreate events. -func (eh guildScheduledEventCreateEventHandler) Type() string { - return guildScheduledEventCreateEventType -} - -// New returns a new instance of GuildScheduledEventCreate. -func (eh guildScheduledEventCreateEventHandler) New() interface{} { - return &GuildScheduledEventCreate{} -} - -// Handle is the handler for GuildScheduledEventCreate events. -func (eh guildScheduledEventCreateEventHandler) Handle(s *Session, i interface{}) { - if t, ok := i.(*GuildScheduledEventCreate); ok { - eh(s, t) - } -} - -// guildScheduledEventUpdateEventHandler is an event handler for GuildScheduledEventUpdate events. -type guildScheduledEventUpdateEventHandler func(*Session, *GuildScheduledEventUpdate) - -// Type returns the event type for GuildScheduledEventUpdate events. -func (eh guildScheduledEventUpdateEventHandler) Type() string { - return guildScheduledEventUpdateEventType -} - -// New returns a new instance of GuildScheduledEventUpdate. -func (eh guildScheduledEventUpdateEventHandler) New() interface{} { - return &GuildScheduledEventUpdate{} -} - -// Handle is the handler for GuildScheduledEventUpdate events. -func (eh guildScheduledEventUpdateEventHandler) Handle(s *Session, i interface{}) { - if t, ok := i.(*GuildScheduledEventUpdate); ok { - eh(s, t) - } -} - -// guildScheduledEventDeleteEventHandler is an event handler for GuildScheduledEventDelete events. -type guildScheduledEventDeleteEventHandler func(*Session, *GuildScheduledEventDelete) - -// Type returns the event type for GuildScheduledEventDelete events. -func (eh guildScheduledEventDeleteEventHandler) Type() string { - return guildScheduledEventDeleteEventType -} - -// New returns a new instance of GuildScheduledEventDelete. -func (eh guildScheduledEventDeleteEventHandler) New() interface{} { - return &GuildScheduledEventDelete{} -} - -// Handle is the handler for GuildScheduledEventDelete events. -func (eh guildScheduledEventDeleteEventHandler) Handle(s *Session, i interface{}) { - if t, ok := i.(*GuildScheduledEventDelete); ok { - eh(s, t) - } -} - // guildMemberAddEventHandler is an event handler for GuildMemberAdd events. type guildMemberAddEventHandler func(*Session, *GuildMemberAdd) @@ -510,6 +455,166 @@ func (eh guildRoleUpdateEventHandler) Handle(s *Session, i interface{}) { } } +// guildStageInstanceEventCreateHandler is an event handler for StageInstanceEventCreate events. +type guildStageInstanceEventCreateHandler func(*Session, *StageInstanceEventCreate) + +// Type returns the event type for StageInstanceEventCreate events. +func (eh guildStageInstanceEventCreateHandler) Type() string { + return guildStageInstanceCreateEventType +} + +// New returns a new instance of StageInstanceEventCreate. +func (eh guildStageInstanceEventCreateHandler) New() interface{} { + return &StageInstanceEventCreate{} +} + +// Handle is the handler for StageInstanceEventCreate events. +func (eh guildStageInstanceEventCreateHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*StageInstanceEventCreate); ok { + eh(s, t) + } +} + +// guildStageInstanceEventUpdateHandler is an event handler for StageInstanceEventUpdate events. +type guildStageInstanceEventUpdateHandler func(*Session, *StageInstanceEventUpdate) + +// Type returns the event type for StageInstanceEventUpdate events. +func (eh guildStageInstanceEventUpdateHandler) Type() string { + return guildStageInstanceCreateEventType +} + +// New returns a new instance of StageInstanceEventUpdate. +func (eh guildStageInstanceEventUpdateHandler) New() interface{} { + return &StageInstanceEventUpdate{} +} + +// Handle is the handler for StageInstanceEventUpdate events. +func (eh guildStageInstanceEventUpdateHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*StageInstanceEventUpdate); ok { + eh(s, t) + } +} + +// guildStageInstanceEventDeleteHandler is an event handler for StageInstanceEventDelete events. +type guildStageInstanceEventDeleteHandler func(*Session, *StageInstanceEventDelete) + +// Type returns the event type for StageInstanceEventDelete events. +func (eh guildStageInstanceEventDeleteHandler) Type() string { + return guildStageInstanceCreateEventType +} + +// New returns a new instance of StageInstanceEventDelete. +func (eh guildStageInstanceEventDeleteHandler) New() interface{} { + return &StageInstanceEventDelete{} +} + +// Handle is the handler for StageInstanceEventDelete events. +func (eh guildStageInstanceEventDeleteHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*StageInstanceEventDelete); ok { + eh(s, t) + } +} + +// guildScheduledEventCreateEventHandler is an event handler for GuildScheduledEventCreate events. +type guildScheduledEventCreateEventHandler func(*Session, *GuildScheduledEventCreate) + +// Type returns the event type for GuildScheduledEventCreate events. +func (eh guildScheduledEventCreateEventHandler) Type() string { + return guildScheduledEventCreateEventType +} + +// New returns a new instance of GuildScheduledEventCreate. +func (eh guildScheduledEventCreateEventHandler) New() interface{} { + return &GuildScheduledEventCreate{} +} + +// Handle is the handler for GuildScheduledEventCreate events. +func (eh guildScheduledEventCreateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildScheduledEventCreate); ok { + eh(s, t) + } +} + +// guildScheduledEventDeleteEventHandler is an event handler for GuildScheduledEventDelete events. +type guildScheduledEventDeleteEventHandler func(*Session, *GuildScheduledEventDelete) + +// Type returns the event type for GuildScheduledEventDelete events. +func (eh guildScheduledEventDeleteEventHandler) Type() string { + return guildScheduledEventDeleteEventType +} + +// New returns a new instance of GuildScheduledEventDelete. +func (eh guildScheduledEventDeleteEventHandler) New() interface{} { + return &GuildScheduledEventDelete{} +} + +// Handle is the handler for GuildScheduledEventDelete events. +func (eh guildScheduledEventDeleteEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildScheduledEventDelete); ok { + eh(s, t) + } +} + +// guildScheduledEventUpdateEventHandler is an event handler for GuildScheduledEventUpdate events. +type guildScheduledEventUpdateEventHandler func(*Session, *GuildScheduledEventUpdate) + +// Type returns the event type for GuildScheduledEventUpdate events. +func (eh guildScheduledEventUpdateEventHandler) Type() string { + return guildScheduledEventUpdateEventType +} + +// New returns a new instance of GuildScheduledEventUpdate. +func (eh guildScheduledEventUpdateEventHandler) New() interface{} { + return &GuildScheduledEventUpdate{} +} + +// Handle is the handler for GuildScheduledEventUpdate events. +func (eh guildScheduledEventUpdateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildScheduledEventUpdate); ok { + eh(s, t) + } +} + +// guildScheduledEventUserAddEventHandler is an event handler for GuildScheduledEventUserAdd events. +type guildScheduledEventUserAddEventHandler func(*Session, *GuildScheduledEventUserAdd) + +// Type returns the event type for GuildScheduledEventUserAdd events. +func (eh guildScheduledEventUserAddEventHandler) Type() string { + return guildScheduledEventUserAddEventType +} + +// New returns a new instance of GuildScheduledEventUserAdd. +func (eh guildScheduledEventUserAddEventHandler) New() interface{} { + return &GuildScheduledEventUserAdd{} +} + +// Handle is the handler for GuildScheduledEventUserAdd events. +func (eh guildScheduledEventUserAddEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildScheduledEventUserAdd); ok { + eh(s, t) + } +} + +// guildScheduledEventUserRemoveEventHandler is an event handler for GuildScheduledEventUserRemove events. +type guildScheduledEventUserRemoveEventHandler func(*Session, *GuildScheduledEventUserRemove) + +// Type returns the event type for GuildScheduledEventUserRemove events. +func (eh guildScheduledEventUserRemoveEventHandler) Type() string { + return guildScheduledEventUserRemoveEventType +} + +// New returns a new instance of GuildScheduledEventUserRemove. +func (eh guildScheduledEventUserRemoveEventHandler) New() interface{} { + return &GuildScheduledEventUserRemove{} +} + +// Handle is the handler for GuildScheduledEventUserRemove events. +func (eh guildScheduledEventUserRemoveEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildScheduledEventUserRemove); ok { + eh(s, t) + } +} + // guildUpdateEventHandler is an event handler for GuildUpdate events. type guildUpdateEventHandler func(*Session, *GuildUpdate) @@ -1195,12 +1300,6 @@ func handlerForInterface(handler interface{}) EventHandler { return guildEmojisUpdateEventHandler(v) case func(*Session, *GuildIntegrationsUpdate): return guildIntegrationsUpdateEventHandler(v) - case func(*Session, *GuildScheduledEventCreate): - return guildScheduledEventCreateEventHandler(v) - case func(*Session, *GuildScheduledEventUpdate): - return guildScheduledEventUpdateEventHandler(v) - case func(*Session, *GuildScheduledEventDelete): - return guildScheduledEventDeleteEventHandler(v) case func(*Session, *GuildMemberAdd): return guildMemberAddEventHandler(v) case func(*Session, *GuildMemberRemove): @@ -1215,6 +1314,16 @@ func handlerForInterface(handler interface{}) EventHandler { return guildRoleDeleteEventHandler(v) case func(*Session, *GuildRoleUpdate): return guildRoleUpdateEventHandler(v) + case func(*Session, *GuildScheduledEventCreate): + return guildScheduledEventCreateEventHandler(v) + case func(*Session, *GuildScheduledEventDelete): + return guildScheduledEventDeleteEventHandler(v) + case func(*Session, *GuildScheduledEventUpdate): + return guildScheduledEventUpdateEventHandler(v) + case func(*Session, *GuildScheduledEventUserAdd): + return guildScheduledEventUserAddEventHandler(v) + case func(*Session, *GuildScheduledEventUserRemove): + return guildScheduledEventUserRemoveEventHandler(v) case func(*Session, *GuildUpdate): return guildUpdateEventHandler(v) case func(*Session, *InteractionCreate): @@ -1297,9 +1406,6 @@ func init() { registerInterfaceProvider(guildDeleteEventHandler(nil)) registerInterfaceProvider(guildEmojisUpdateEventHandler(nil)) registerInterfaceProvider(guildIntegrationsUpdateEventHandler(nil)) - registerInterfaceProvider(guildScheduledEventCreateEventHandler(nil)) - registerInterfaceProvider(guildScheduledEventUpdateEventHandler(nil)) - registerInterfaceProvider(guildScheduledEventDeleteEventHandler(nil)) registerInterfaceProvider(guildMemberAddEventHandler(nil)) registerInterfaceProvider(guildMemberRemoveEventHandler(nil)) registerInterfaceProvider(guildMemberUpdateEventHandler(nil)) @@ -1307,6 +1413,11 @@ func init() { registerInterfaceProvider(guildRoleCreateEventHandler(nil)) registerInterfaceProvider(guildRoleDeleteEventHandler(nil)) registerInterfaceProvider(guildRoleUpdateEventHandler(nil)) + registerInterfaceProvider(guildScheduledEventCreateEventHandler(nil)) + registerInterfaceProvider(guildScheduledEventDeleteEventHandler(nil)) + registerInterfaceProvider(guildScheduledEventUpdateEventHandler(nil)) + registerInterfaceProvider(guildScheduledEventUserAddEventHandler(nil)) + registerInterfaceProvider(guildScheduledEventUserRemoveEventHandler(nil)) registerInterfaceProvider(guildUpdateEventHandler(nil)) registerInterfaceProvider(interactionCreateEventHandler(nil)) registerInterfaceProvider(inviteCreateEventHandler(nil)) diff --git a/vendor/github.com/bwmarrin/discordgo/events.go b/vendor/github.com/bwmarrin/discordgo/events.go index cc5d7116..c90aede1 100644 --- a/vendor/github.com/bwmarrin/discordgo/events.go +++ b/vendor/github.com/bwmarrin/discordgo/events.go @@ -191,7 +191,9 @@ type GuildMembersChunk struct { Members []*Member `json:"members"` ChunkIndex int `json:"chunk_index"` ChunkCount int `json:"chunk_count"` + NotFound []string `json:"not_found,omitempty"` Presences []*Presence `json:"presences,omitempty"` + Nonce string `json:"nonce,omitempty"` } // GuildIntegrationsUpdate is the data for a GuildIntegrationsUpdate event. @@ -199,6 +201,21 @@ type GuildIntegrationsUpdate struct { GuildID string `json:"guild_id"` } +// StageInstanceEventCreate is the data for a StageInstanceEventCreate event. +type StageInstanceEventCreate struct { + *StageInstance +} + +// StageInstanceEventUpdate is the data for a StageInstanceEventUpdate event. +type StageInstanceEventUpdate struct { + *StageInstance +} + +// StageInstanceEventDelete is the data for a StageInstanceEventDelete event. +type StageInstanceEventDelete struct { + *StageInstance +} + // GuildScheduledEventCreate is the data for a GuildScheduledEventCreate event. type GuildScheduledEventCreate struct { *GuildScheduledEvent @@ -214,6 +231,20 @@ type GuildScheduledEventDelete struct { *GuildScheduledEvent } +// GuildScheduledEventUserAdd is the data for a GuildScheduledEventUserAdd event. +type GuildScheduledEventUserAdd struct { + GuildScheduledEventID string `json:"guild_scheduled_event_id"` + UserID string `json:"user_id"` + GuildID string `json:"guild_id"` +} + +// GuildScheduledEventUserRemove is the data for a GuildScheduledEventUserRemove event. +type GuildScheduledEventUserRemove struct { + GuildScheduledEventID string `json:"guild_scheduled_event_id"` + UserID string `json:"user_id"` + GuildID string `json:"guild_id"` +} + // MessageAck is the data for a MessageAck event. type MessageAck struct { MessageID string `json:"message_id"` diff --git a/vendor/github.com/bwmarrin/discordgo/interactions.go b/vendor/github.com/bwmarrin/discordgo/interactions.go index 0e5ae3c4..7164f65e 100644 --- a/vendor/github.com/bwmarrin/discordgo/interactions.go +++ b/vendor/github.com/bwmarrin/discordgo/interactions.go @@ -35,12 +35,14 @@ type ApplicationCommand struct { Version string `json:"version,omitempty"` Type ApplicationCommandType `json:"type,omitempty"` Name string `json:"name"` + NameLocalizations *map[Locale]string `json:"name_localizations,omitempty"` DefaultPermission *bool `json:"default_permission,omitempty"` // NOTE: Chat commands only. Otherwise it mustn't be set. - Description string `json:"description,omitempty"` - Options []*ApplicationCommandOption `json:"options"` + Description string `json:"description,omitempty"` + DescriptionLocalizations *map[Locale]string `json:"description_localizations,omitempty"` + Options []*ApplicationCommandOption `json:"options"` } // ApplicationCommandOptionType indicates the type of a slash command's option. @@ -91,9 +93,11 @@ func (t ApplicationCommandOptionType) String() string { // ApplicationCommandOption represents an option/subcommand/subcommands group. type ApplicationCommandOption struct { - Type ApplicationCommandOptionType `json:"type"` - Name string `json:"name"` - Description string `json:"description,omitempty"` + Type ApplicationCommandOptionType `json:"type"` + Name string `json:"name"` + NameLocalizations map[Locale]string `json:"name_localizations,omitempty"` + Description string `json:"description,omitempty"` + DescriptionLocalizations map[Locale]string `json:"description_localizations,omitempty"` // NOTE: This feature was on the API, but at some point developers decided to remove it. // So I commented it, until it will be officially on the docs. // Default bool `json:"default"` @@ -113,8 +117,9 @@ type ApplicationCommandOption struct { // ApplicationCommandOptionChoice represents a slash command option choice. type ApplicationCommandOptionChoice struct { - Name string `json:"name"` - Value interface{} `json:"value"` + Name string `json:"name"` + NameLocalizations map[Locale]string `json:"name_localizations,omitempty"` + Value interface{} `json:"value"` } // ApplicationCommandPermissions represents a single user or role permission for a command. @@ -175,6 +180,7 @@ func (t InteractionType) String() string { // Interaction represents data of an interaction. type Interaction struct { ID string `json:"id"` + AppID string `json:"application_id"` Type InteractionType `json:"type"` Data InteractionData `json:"data"` GuildID string `json:"guild_id"` @@ -509,7 +515,7 @@ type InteractionResponseData struct { TTS bool `json:"tts"` Content string `json:"content"` Components []MessageComponent `json:"components"` - Embeds []*MessageEmbed `json:"embeds,omitempty"` + Embeds []*MessageEmbed `json:"embeds"` AllowedMentions *MessageAllowedMentions `json:"allowed_mentions,omitempty"` Flags uint64 `json:"flags,omitempty"` Files []*File `json:"-"` diff --git a/vendor/github.com/bwmarrin/discordgo/message.go b/vendor/github.com/bwmarrin/discordgo/message.go index eb2f4962..22d5f740 100644 --- a/vendor/github.com/bwmarrin/discordgo/message.go +++ b/vendor/github.com/bwmarrin/discordgo/message.go @@ -199,7 +199,9 @@ const ( MessageFlagsCrossPosted MessageFlags = 1 << 0 // MessageFlagsIsCrossPosted this message originated from a message in another channel (via Channel Following). MessageFlagsIsCrossPosted MessageFlags = 1 << 1 - // MessageFlagsSupressEmbeds do not include any embeds when serializing this message. + // MessageFlagsSuppressEmbeds do not include any embeds when serializing this message. + MessageFlagsSuppressEmbeds MessageFlags = 1 << 2 + // TODO: deprecated, remove when compatibility is not needed MessageFlagsSupressEmbeds MessageFlags = 1 << 2 // MessageFlagsSourceMessageDeleted the source message for this crosspost has been deleted (via Channel Following). MessageFlagsSourceMessageDeleted MessageFlags = 1 << 3 @@ -225,7 +227,7 @@ type File struct { // MessageSend stores all parameters you can send with ChannelMessageSendComplex. type MessageSend struct { Content string `json:"content,omitempty"` - Embeds []*MessageEmbed `json:"embeds,omitempty"` + Embeds []*MessageEmbed `json:"embeds"` TTS bool `json:"tts"` Components []MessageComponent `json:"components"` Files []*File `json:"-"` @@ -244,8 +246,9 @@ type MessageSend struct { type MessageEdit struct { Content *string `json:"content,omitempty"` Components []MessageComponent `json:"components"` - Embeds []*MessageEmbed `json:"embeds,omitempty"` + Embeds []*MessageEmbed `json:"embeds"` AllowedMentions *MessageAllowedMentions `json:"allowed_mentions,omitempty"` + Flags MessageFlags `json:"flags,omitempty"` ID string Channel string @@ -342,7 +345,7 @@ type MessageEmbedFooter struct { // MessageEmbedImage is a part of a MessageEmbed struct. type MessageEmbedImage struct { - URL string `json:"url,omitempty"` + URL string `json:"url"` ProxyURL string `json:"proxy_url,omitempty"` Width int `json:"width,omitempty"` Height int `json:"height,omitempty"` @@ -350,7 +353,7 @@ type MessageEmbedImage struct { // MessageEmbedThumbnail is a part of a MessageEmbed struct. type MessageEmbedThumbnail struct { - URL string `json:"url,omitempty"` + URL string `json:"url"` ProxyURL string `json:"proxy_url,omitempty"` Width int `json:"width,omitempty"` Height int `json:"height,omitempty"` @@ -372,7 +375,7 @@ type MessageEmbedProvider struct { // MessageEmbedAuthor is a part of a MessageEmbed struct. type MessageEmbedAuthor struct { URL string `json:"url,omitempty"` - Name string `json:"name,omitempty"` + Name string `json:"name"` IconURL string `json:"icon_url,omitempty"` ProxyIconURL string `json:"proxy_icon_url,omitempty"` } diff --git a/vendor/github.com/bwmarrin/discordgo/restapi.go b/vendor/github.com/bwmarrin/discordgo/restapi.go index 41796fe2..bb21ef21 100644 --- a/vendor/github.com/bwmarrin/discordgo/restapi.go +++ b/vendor/github.com/bwmarrin/discordgo/restapi.go @@ -39,6 +39,59 @@ var ( ErrUnauthorized = errors.New("HTTP request was unauthorized. This could be because the provided token was not a bot token. Please add \"Bot \" to the start of your token. https://discord.com/developers/docs/reference#authentication-example-bot-token-authorization-header") ) +var ( + // Marshal defines function used to encode JSON payloads + Marshal func(v interface{}) ([]byte, error) = json.Marshal + // Unmarshal defines function used to decode JSON payloads + Unmarshal func(src []byte, v interface{}) error = json.Unmarshal +) + +// RESTError stores error information about a request with a bad response code. +// Message is not always present, there are cases where api calls can fail +// without returning a json message. +type RESTError struct { + Request *http.Request + Response *http.Response + ResponseBody []byte + + Message *APIErrorMessage // Message may be nil. +} + +// newRestError returns a new REST API error. +func newRestError(req *http.Request, resp *http.Response, body []byte) *RESTError { + restErr := &RESTError{ + Request: req, + Response: resp, + ResponseBody: body, + } + + // Attempt to decode the error and assume no message was provided if it fails + var msg *APIErrorMessage + err := Unmarshal(body, &msg) + if err == nil { + restErr.Message = msg + } + + return restErr +} + +// Error returns a Rest API Error with its status code and body. +func (r RESTError) Error() string { + return "HTTP " + r.Response.Status + ", " + string(r.ResponseBody) +} + +// RateLimitError is returned when a request exceeds a rate limit +// and ShouldRetryOnRateLimit is false. The request may be manually +// retried after waiting the duration specified by RetryAfter. +type RateLimitError struct { + *RateLimit +} + +// Error returns a rate limit error with rate limited endpoint and retry time. +func (e RateLimitError) Error() string { + return "Rate limit exceeded on " + e.URL + ", retry after " + e.RetryAfter.String() +} + // Request is the same as RequestWithBucketID but the bucket id is the same as the urlStr func (s *Session) Request(method, urlStr string, data interface{}) (response []byte, err error) { return s.RequestWithBucketID(method, urlStr, data, strings.SplitN(urlStr, "?", 2)[0]) @@ -48,7 +101,7 @@ func (s *Session) Request(method, urlStr string, data interface{}) (response []b func (s *Session) RequestWithBucketID(method, urlStr string, data interface{}, bucketID string) (response []byte, err error) { var body []byte if data != nil { - body, err = json.Marshal(data) + body, err = Marshal(data) if err != nil { return } @@ -108,7 +161,7 @@ func (s *Session) RequestWithLockedBucket(method, urlStr, contentType string, b } defer func() { err2 := resp.Body.Close() - if err2 != nil { + if s.Debug && err2 != nil { log.Println("error closing resp body") } }() @@ -147,19 +200,24 @@ func (s *Session) RequestWithLockedBucket(method, urlStr, contentType string, b } case 429: // TOO MANY REQUESTS - Rate limiting rl := TooManyRequests{} - err = json.Unmarshal(response, &rl) + err = Unmarshal(response, &rl) if err != nil { s.log(LogError, "rate limit unmarshal error, %s", err) return } - s.log(LogInformational, "Rate Limiting %s, retry in %v", urlStr, rl.RetryAfter) - s.handleEvent(rateLimitEventType, &RateLimit{TooManyRequests: &rl, URL: urlStr}) - time.Sleep(rl.RetryAfter) - // we can make the above smarter - // this method can cause longer delays than required + if s.ShouldRetryOnRateLimit { + s.log(LogInformational, "Rate Limiting %s, retry in %v", urlStr, rl.RetryAfter) + s.handleEvent(rateLimitEventType, &RateLimit{TooManyRequests: &rl, URL: urlStr}) + + time.Sleep(rl.RetryAfter) + // we can make the above smarter + // this method can cause longer delays than required - response, err = s.RequestWithLockedBucket(method, urlStr, contentType, b, s.Ratelimiter.LockBucketObject(bucket), sequence) + response, err = s.RequestWithLockedBucket(method, urlStr, contentType, b, s.Ratelimiter.LockBucketObject(bucket), sequence) + } else { + err = &RateLimitError{&RateLimit{TooManyRequests: &rl, URL: urlStr}} + } case http.StatusUnauthorized: if strings.Index(s.Token, "Bot ") != 0 { s.log(LogInformational, ErrUnauthorized.Error()) @@ -174,7 +232,7 @@ func (s *Session) RequestWithLockedBucket(method, urlStr, contentType string, b } func unmarshal(data []byte, v interface{}) error { - err := json.Unmarshal(data, v) + err := Unmarshal(data, v) if err != nil { return fmt.Errorf("%w: %s", ErrJSONUnmarshal, err) } @@ -438,6 +496,19 @@ func (s *Session) Guild(guildID string) (st *Guild, err error) { return } +// GuildWithCounts returns a Guild structure of a specific Guild with approximate member and presence counts. +// guildID : The ID of a Guild +func (s *Session) GuildWithCounts(guildID string) (st *Guild, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointGuild(guildID)+"?with_counts=true", nil, EndpointGuild(guildID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + // GuildPreview returns a GuildPreview structure of a specific public Guild. // guildID : The ID of a Guild func (s *Session) GuildPreview(guildID string) (st *GuildPreview, err error) { @@ -481,7 +552,7 @@ func (s *Session) GuildEdit(guildID string, g GuildParams) (st *Guild, err error } } - //Bounds checking for regions + // Bounds checking for regions if g.Region != "" { isValid := false regions, _ := s.VoiceRegions() @@ -530,12 +601,30 @@ func (s *Session) GuildLeave(guildID string) (err error) { return } -// GuildBans returns an array of GuildBan structures for all bans of a -// given guild. -// guildID : The ID of a Guild. -func (s *Session) GuildBans(guildID string) (st []*GuildBan, err error) { +// GuildBans returns an array of GuildBan structures for bans in the given guild. +// guildID : The ID of a Guild +// limit : Max number of bans to return (max 1000) +// beforeID : If not empty all returned users will be after the given id +// afterID : If not empty all returned users will be before the given id +func (s *Session) GuildBans(guildID string, limit int, beforeID, afterID string) (st []*GuildBan, err error) { + uri := EndpointGuildBans(guildID) - body, err := s.RequestWithBucketID("GET", EndpointGuildBans(guildID), nil, EndpointGuildBans(guildID)) + v := url.Values{} + if limit != 0 { + v.Set("limit", strconv.Itoa(limit)) + } + if beforeID != "" { + v.Set("before", beforeID) + } + if afterID != "" { + v.Set("after", afterID) + } + + if len(v) > 0 { + uri += "?" + v.Encode() + } + + body, err := s.RequestWithBucketID("GET", uri, nil, EndpointGuildBans(guildID)) if err != nil { return } @@ -631,6 +720,29 @@ func (s *Session) GuildMembers(guildID string, after string, limit int) (st []*M return } +// GuildMembersSearch returns a list of guild member objects whose username or nickname starts with a provided string +// guildID : The ID of a Guild +// query : Query string to match username(s) and nickname(s) against +// limit : Max number of members to return (default 1, min 1, max 1000) +func (s *Session) GuildMembersSearch(guildID, query string, limit int) (st []*Member, err error) { + + uri := EndpointGuildMembersSearch(guildID) + + queryParams := url.Values{} + queryParams.Set("query", query) + if limit > 1 { + queryParams.Set("limit", strconv.Itoa(limit)) + } + + body, err := s.RequestWithBucketID("GET", uri+"?"+queryParams.Encode(), nil, uri) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + // GuildMember returns a member of a guild. // guildID : The ID of a Guild. // userID : The ID of a User @@ -710,6 +822,21 @@ func (s *Session) GuildMemberEdit(guildID, userID string, roles []string) (err e return } +// GuildMemberEditComplex edits the nickname and roles of a member. +// guildID : The ID of a Guild. +// userID : The ID of a User. +// data : A GuildMemberEditData struct with the new nickname and roles +func (s *Session) GuildMemberEditComplex(guildID, userID string, data GuildMemberParams) (st *Member, err error) { + var body []byte + body, err = s.RequestWithBucketID("PATCH", EndpointGuildMember(guildID, userID), data, EndpointGuildMember(guildID, "")) + if err != nil { + return nil, err + } + + err = unmarshal(body, &st) + return +} + // GuildMemberMove moves a guild member from one voice channel to another/none // guildID : The ID of a Guild. // userID : The ID of a User. @@ -1218,6 +1345,20 @@ func (s *Session) GuildEmojis(guildID string) (emoji []*Emoji, err error) { return } +// GuildEmoji returns specified emoji. +// guildID : The ID of a Guild +// emojiID : The ID of an Emoji to retrieve +func (s *Session) GuildEmoji(guildID, emojiID string) (emoji *Emoji, err error) { + var body []byte + body, err = s.RequestWithBucketID("GET", EndpointGuildEmoji(guildID, emojiID), nil, EndpointGuildEmoji(guildID, emojiID)) + if err != nil { + return + } + + err = unmarshal(body, &emoji) + return +} + // GuildEmojiCreate creates a new emoji // guildID : The ID of a Guild. // name : The Name of the Emoji. @@ -1244,12 +1385,12 @@ func (s *Session) GuildEmojiCreate(guildID, name, image string, roles []string) // guildID : The ID of a Guild. // emojiID : The ID of an Emoji. // name : The Name of the Emoji. -// roles : The roles for which this emoji will be whitelisted, can be nil. +// roles : The roles for which this emoji will be whitelisted, if nil or empty the roles will be reset. func (s *Session) GuildEmojiEdit(guildID, emojiID, name string, roles []string) (emoji *Emoji, err error) { data := struct { Name string `json:"name"` - Roles []string `json:"roles,omitempty"` + Roles []string `json:"roles"` }{name, roles} body, err := s.RequestWithBucketID("PATCH", EndpointGuildEmoji(guildID, emojiID), data, EndpointGuildEmojis(guildID)) @@ -1851,6 +1992,37 @@ func (s *Session) InviteWithCounts(inviteID string) (st *Invite, err error) { return } +// InviteComplex returns an Invite structure of the given invite including specified fields. +// inviteID : The invite code +// guildScheduledEventID : If specified, includes specified guild scheduled event. +// withCounts : Whether to include approximate member counts or not +// withExpiration : Whether to include expiration time or not +func (s *Session) InviteComplex(inviteID, guildScheduledEventID string, withCounts, withExpiration bool) (st *Invite, err error) { + endpoint := EndpointInvite(inviteID) + v := url.Values{} + if guildScheduledEventID != "" { + v.Set("guild_scheduled_event_id", guildScheduledEventID) + } + if withCounts { + v.Set("with_counts", "true") + } + if withExpiration { + v.Set("with_expiration", "true") + } + + if len(v) != 0 { + endpoint += "?" + v.Encode() + } + + body, err := s.RequestWithBucketID("GET", endpoint, nil, EndpointInvite("")) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + // InviteDelete deletes an existing invite // inviteID : the code of an invite func (s *Session) InviteDelete(inviteID string) (st *Invite, err error) { @@ -2158,7 +2330,7 @@ func (s *Session) WebhookMessage(webhookID, token, messageID string) (message *M return } - err = json.Unmarshal(body, &message) + err = Unmarshal(body, &message) return } @@ -2207,7 +2379,7 @@ func (s *Session) WebhookMessageDelete(webhookID, token, messageID string) (err // MessageReactionAdd creates an emoji reaction to a message. // channelID : The channel ID. // messageID : The message ID. -// emojiID : Either the unicode emoji for the reaction, or a guild emoji identifier. +// emojiID : Either the unicode emoji for the reaction, or a guild emoji identifier in name:id format (e.g. "hello:1234567654321") func (s *Session) MessageReactionAdd(channelID, messageID, emojiID string) error { // emoji such as #⃣ need to have # escaped @@ -2687,10 +2859,9 @@ func (s *Session) ApplicationCommandPermissionsBatchEdit(appID, guildID string, } // InteractionRespond creates the response to an interaction. -// appID : The application ID. // interaction : Interaction instance. // resp : Response message data. -func (s *Session) InteractionRespond(interaction *Interaction, resp *InteractionResponse) (err error) { +func (s *Session) InteractionRespond(interaction *Interaction, resp *InteractionResponse) error { endpoint := EndpointInteractionResponse(interaction.ID, interaction.Token) if resp.Data != nil && len(resp.Data.Files) > 0 { @@ -2700,32 +2871,30 @@ func (s *Session) InteractionRespond(interaction *Interaction, resp *Interaction } _, err = s.request("POST", endpoint, contentType, body, endpoint, 0) - } else { - _, err = s.RequestWithBucketID("POST", endpoint, *resp, endpoint) + return err } + + _, err := s.RequestWithBucketID("POST", endpoint, *resp, endpoint) return err } // InteractionResponse gets the response to an interaction. -// appID : The application ID. // interaction : Interaction instance. -func (s *Session) InteractionResponse(appID string, interaction *Interaction) (*Message, error) { - return s.WebhookMessage(appID, interaction.Token, "@original") +func (s *Session) InteractionResponse(interaction *Interaction) (*Message, error) { + return s.WebhookMessage(interaction.AppID, interaction.Token, "@original") } // InteractionResponseEdit edits the response to an interaction. -// appID : The application ID. // interaction : Interaction instance. // newresp : Updated response message data. -func (s *Session) InteractionResponseEdit(appID string, interaction *Interaction, newresp *WebhookEdit) (*Message, error) { - return s.WebhookMessageEdit(appID, interaction.Token, "@original", newresp) +func (s *Session) InteractionResponseEdit(interaction *Interaction, newresp *WebhookEdit) (*Message, error) { + return s.WebhookMessageEdit(interaction.AppID, interaction.Token, "@original", newresp) } // InteractionResponseDelete deletes the response to an interaction. -// appID : The application ID. // interaction : Interaction instance. -func (s *Session) InteractionResponseDelete(appID string, interaction *Interaction) error { - endpoint := EndpointInteractionResponseActions(appID, interaction.Token) +func (s *Session) InteractionResponseDelete(interaction *Interaction) error { + endpoint := EndpointInteractionResponseActions(interaction.AppID, interaction.Token) _, err := s.RequestWithBucketID("DELETE", endpoint, nil, endpoint) @@ -2733,29 +2902,76 @@ func (s *Session) InteractionResponseDelete(appID string, interaction *Interacti } // FollowupMessageCreate creates the followup message for an interaction. -// appID : The application ID. // interaction : Interaction instance. // wait : Waits for server confirmation of message send and ensures that the return struct is populated (it is nil otherwise) // data : Data of the message to send. -func (s *Session) FollowupMessageCreate(appID string, interaction *Interaction, wait bool, data *WebhookParams) (*Message, error) { - return s.WebhookExecute(appID, interaction.Token, wait, data) +func (s *Session) FollowupMessageCreate(interaction *Interaction, wait bool, data *WebhookParams) (*Message, error) { + return s.WebhookExecute(interaction.AppID, interaction.Token, wait, data) } // FollowupMessageEdit edits a followup message of an interaction. -// appID : The application ID. // interaction : Interaction instance. // messageID : The followup message ID. // data : Data to update the message -func (s *Session) FollowupMessageEdit(appID string, interaction *Interaction, messageID string, data *WebhookEdit) (*Message, error) { - return s.WebhookMessageEdit(appID, interaction.Token, messageID, data) +func (s *Session) FollowupMessageEdit(interaction *Interaction, messageID string, data *WebhookEdit) (*Message, error) { + return s.WebhookMessageEdit(interaction.AppID, interaction.Token, messageID, data) } // FollowupMessageDelete deletes a followup message of an interaction. -// appID : The application ID. // interaction : Interaction instance. // messageID : The followup message ID. -func (s *Session) FollowupMessageDelete(appID string, interaction *Interaction, messageID string) error { - return s.WebhookMessageDelete(appID, interaction.Token, messageID) +func (s *Session) FollowupMessageDelete(interaction *Interaction, messageID string) error { + return s.WebhookMessageDelete(interaction.AppID, interaction.Token, messageID) +} + +// ------------------------------------------------------------------------------------------------ +// Functions specific to stage instances +// ------------------------------------------------------------------------------------------------ + +// StageInstanceCreate creates and returns a new Stage instance associated to a Stage channel. +// data : Parameters needed to create a stage instance. +// data : The data of the Stage instance to create +func (s *Session) StageInstanceCreate(data *StageInstanceParams) (si *StageInstance, err error) { + body, err := s.RequestWithBucketID("POST", EndpointStageInstances, data, EndpointStageInstances) + if err != nil { + return + } + + err = unmarshal(body, &si) + return +} + +// StageInstance will retrieve a Stage instance by ID of the Stage channel. +// channelID : The ID of the Stage channel +func (s *Session) StageInstance(channelID string) (si *StageInstance, err error) { + body, err := s.RequestWithBucketID("GET", EndpointStageInstance(channelID), nil, EndpointStageInstance(channelID)) + if err != nil { + return + } + + err = unmarshal(body, &si) + return +} + +// StageInstanceEdit will edit a Stage instance by ID of the Stage channel. +// channelID : The ID of the Stage channel +// data : The data to edit the Stage instance +func (s *Session) StageInstanceEdit(channelID string, data *StageInstanceParams) (si *StageInstance, err error) { + + body, err := s.RequestWithBucketID("PATCH", EndpointStageInstance(channelID), data, EndpointStageInstance(channelID)) + if err != nil { + return + } + + err = unmarshal(body, &si) + return +} + +// StageInstanceDelete will delete a Stage instance by ID of the Stage channel. +// channelID : The ID of the Stage channel +func (s *Session) StageInstanceDelete(channelID string) (err error) { + _, err = s.RequestWithBucketID("DELETE", EndpointStageInstance(channelID), nil, EndpointStageInstance(channelID)) + return } // ------------------------------------------------------------------------------------------------ diff --git a/vendor/github.com/bwmarrin/discordgo/state.go b/vendor/github.com/bwmarrin/discordgo/state.go index e75be895..a25d55f5 100644 --- a/vendor/github.com/bwmarrin/discordgo/state.go +++ b/vendor/github.com/bwmarrin/discordgo/state.go @@ -979,8 +979,9 @@ func (s *State) OnInterface(se *Session, i interface{}) (err error) { err = s.GuildRemove(t.Guild) case *GuildMemberAdd: + var guild *Guild // Updates the MemberCount of the guild. - guild, err := s.Guild(t.Member.GuildID) + guild, err = s.Guild(t.Member.GuildID) if err != nil { return err } @@ -995,8 +996,9 @@ func (s *State) OnInterface(se *Session, i interface{}) (err error) { err = s.MemberAdd(t.Member) } case *GuildMemberRemove: + var guild *Guild // Updates the MemberCount of the guild. - guild, err := s.Guild(t.Member.GuildID) + guild, err = s.Guild(t.Member.GuildID) if err != nil { return err } diff --git a/vendor/github.com/bwmarrin/discordgo/structs.go b/vendor/github.com/bwmarrin/discordgo/structs.go index 3a92c9fd..a1e0232c 100644 --- a/vendor/github.com/bwmarrin/discordgo/structs.go +++ b/vendor/github.com/bwmarrin/discordgo/structs.go @@ -43,6 +43,9 @@ type Session struct { // Should the session reconnect the websocket on errors. ShouldReconnectOnError bool + // Should the session retry requests when rate limited. + ShouldRetryOnRateLimit bool + // Identify is sent during initial handshake with the discord gateway. // https://discord.com/developers/docs/topics/gateway#identify Identify Identify @@ -260,6 +263,7 @@ const ( ChannelTypeGuildNewsThread ChannelType = 10 ChannelTypeGuildPublicThread ChannelType = 11 ChannelTypeGuildPrivateThread ChannelType = 12 + ChannelTypeGuildStageVoice ChannelType = 13 ) // A Channel holds all data related to an individual Discord channel. @@ -360,7 +364,7 @@ type ChannelEdit struct { UserLimit int `json:"user_limit,omitempty"` PermissionOverwrites []*PermissionOverwrite `json:"permission_overwrites,omitempty"` ParentID string `json:"parent_id,omitempty"` - RateLimitPerUser int `json:"rate_limit_per_user,omitempty"` + RateLimitPerUser *int `json:"rate_limit_per_user,omitempty"` // NOTE: threads only @@ -552,6 +556,17 @@ const ( ExplicitContentFilterAllMembers ExplicitContentFilterLevel = 2 ) +// GuildNSFWLevel type definition +type GuildNSFWLevel int + +// Constants for GuildNSFWLevel levels from 0 to 3 inclusive +const ( + GuildNSFWLevelDefault GuildNSFWLevel = 0 + GuildNSFWLevelExplicit GuildNSFWLevel = 1 + GuildNSFWLevelSafe GuildNSFWLevel = 2 + GuildNSFWLevelAgeRestricted GuildNSFWLevel = 3 +) + // MfaLevel type definition type MfaLevel int @@ -675,6 +690,9 @@ type Guild struct { // The explicit content filter level ExplicitContentFilter ExplicitContentFilterLevel `json:"explicit_content_filter"` + // The NSFW Level of the guild + NSFWLevel GuildNSFWLevel `json:"nsfw_level"` + // The list of enabled guild features Features []string `json:"features"` @@ -731,6 +749,9 @@ type Guild struct { // Permissions of our user Permissions int64 `json:"permissions,string"` + + // Stage instances in the guild + StageInstances []*StageInstance `json:"stage_instances"` } // A GuildPreview holds data related to a specific public Discord Guild, even if the user is not in the guild. @@ -757,16 +778,31 @@ type GuildPreview struct { // The list of enabled guild features Features []string `json:"features"` - // Approximate number of members in this guild, returned from the GET /guild/<id> endpoint when with_counts is true + // Approximate number of members in this guild + // NOTE: this field is only filled when using GuildWithCounts ApproximateMemberCount int `json:"approximate_member_count"` - // Approximate number of non-offline members in this guild, returned from the GET /guild/<id> endpoint when with_counts is true + // Approximate number of non-offline members in this guild + // NOTE: this field is only filled when using GuildWithCounts ApproximatePresenceCount int `json:"approximate_presence_count"` // the description for the guild Description string `json:"description"` } +// IconURL returns a URL to the guild's icon. +func (g *GuildPreview) IconURL() string { + if g.Icon == "" { + return "" + } + + if strings.HasPrefix(g.Icon, "a_") { + return EndpointGuildIconAnimated(g.ID, g.Icon) + } + + return EndpointGuildIcon(g.ID, g.Icon) +} + // GuildScheduledEvent is a representation of a scheduled event in a guild. Only for retrieval of the data. // https://discord.com/developers/docs/resources/guild-scheduled-event#guild-scheduled-event type GuildScheduledEvent struct { @@ -842,7 +878,7 @@ func (p GuildScheduledEventParams) MarshalJSON() ([]byte, error) { type guildScheduledEventParams GuildScheduledEventParams if p.EntityType == GuildScheduledEventEntityTypeExternal && p.ChannelID == "" { - return json.Marshal(struct { + return Marshal(struct { guildScheduledEventParams ChannelID json.RawMessage `json:"channel_id"` }{ @@ -851,7 +887,7 @@ func (p GuildScheduledEventParams) MarshalJSON() ([]byte, error) { }) } - return json.Marshal(guildScheduledEventParams(p)) + return Marshal(guildScheduledEventParams(p)) } // GuildScheduledEventEntityMetadata holds additional metadata for guild scheduled event. @@ -1093,7 +1129,7 @@ func (t *TimeStamps) UnmarshalJSON(b []byte) error { End float64 `json:"end,omitempty"` Start float64 `json:"start,omitempty"` }{} - err := json.Unmarshal(b, &temp) + err := Unmarshal(b, &temp) if err != nil { return err } @@ -1231,7 +1267,7 @@ func (t *TooManyRequests) UnmarshalJSON(b []byte) error { Message string `json:"message"` RetryAfter float64 `json:"retry_after"` }{} - err := json.Unmarshal(b, &u) + err := Unmarshal(b, &u) if err != nil { return err } @@ -1566,6 +1602,15 @@ type UserGuildSettingsEdit struct { ChannelOverrides map[string]*UserGuildSettingsChannelOverride `json:"channel_overrides"` } +// GuildMemberParams stores data needed to update a member +// https://discord.com/developers/docs/resources/guild#modify-guild-member +type GuildMemberParams struct { + // Value to set user's nickname to + Nick string `json:"nick,omitempty"` + // Array of role ids the member is assigned + Roles *[]string `json:"roles,omitempty"` +} + // An APIErrorMessage is an api error message returned from discord type APIErrorMessage struct { Code int `json:"code"` @@ -1642,7 +1687,7 @@ func (activity *Activity) UnmarshalJSON(b []byte) error { Instance bool `json:"instance,omitempty"` Flags int `json:"flags,omitempty"` }{} - err := json.Unmarshal(b, &temp) + err := Unmarshal(b, &temp) if err != nil { return err } @@ -1695,14 +1740,13 @@ const ( // Identify is sent during initial handshake with the discord gateway. // https://discord.com/developers/docs/topics/gateway#identify type Identify struct { - Token string `json:"token"` - Properties IdentifyProperties `json:"properties"` - Compress bool `json:"compress"` - LargeThreshold int `json:"large_threshold"` - Shard *[2]int `json:"shard,omitempty"` - Presence GatewayStatusUpdate `json:"presence,omitempty"` - GuildSubscriptions bool `json:"guild_subscriptions"` - Intents Intent `json:"intents"` + Token string `json:"token"` + Properties IdentifyProperties `json:"properties"` + Compress bool `json:"compress"` + LargeThreshold int `json:"large_threshold"` + Shard *[2]int `json:"shard,omitempty"` + Presence GatewayStatusUpdate `json:"presence,omitempty"` + Intents Intent `json:"intents"` } // IdentifyProperties contains the "properties" portion of an Identify packet @@ -1715,6 +1759,49 @@ type IdentifyProperties struct { ReferringDomain string `json:"$referring_domain"` } +// StageInstance holds information about a live stage. +// https://discord.com/developers/docs/resources/stage-instance#stage-instance-resource +type StageInstance struct { + // The id of this Stage instance + ID string `json:"id"` + // The guild id of the associated Stage channel + GuildID string `json:"guild_id"` + // The id of the associated Stage channel + ChannelID string `json:"channel_id"` + // The topic of the Stage instance (1-120 characters) + Topic string `json:"topic"` + // The privacy level of the Stage instance + // https://discord.com/developers/docs/resources/stage-instance#stage-instance-object-privacy-level + PrivacyLevel StageInstancePrivacyLevel `json:"privacy_level"` + // Whether or not Stage Discovery is disabled (deprecated) + DiscoverableDisabled bool `json:"discoverable_disabled"` + // The id of the scheduled event for this Stage instance + GuildScheduledEventID string `json:"guild_scheduled_event_id"` +} + +// StageInstanceParams represents the parameters needed to create or edit a stage instance +type StageInstanceParams struct { + // ChannelID represents the id of the Stage channel + ChannelID string `json:"channel_id,omitempty"` + // Topic of the Stage instance (1-120 characters) + Topic string `json:"topic,omitempty"` + // PrivacyLevel of the Stage instance (default GUILD_ONLY) + PrivacyLevel StageInstancePrivacyLevel `json:"privacy_level,omitempty"` + // SendStartNotification will notify @everyone that a Stage instance has started + SendStartNotification bool `json:"send_start_notification,omitempty"` +} + +// StageInstancePrivacyLevel represents the privacy level of a Stage instance +// https://discord.com/developers/docs/resources/stage-instance#stage-instance-object-privacy-level +type StageInstancePrivacyLevel int + +const ( + // StageInstancePrivacyLevelPublic The Stage instance is visible publicly. (deprecated) + StageInstancePrivacyLevelPublic StageInstancePrivacyLevel = 1 + // StageInstancePrivacyLevelGuildOnly The Stage instance is visible to only guild members. + StageInstancePrivacyLevelGuildOnly StageInstancePrivacyLevel = 2 +) + // Constants for the different bit offsets of text channel permissions const ( // Deprecated: PermissionReadMessages has been replaced with PermissionViewChannel for text and voice channels @@ -1731,6 +1818,7 @@ const ( PermissionManageThreads = 0x0000000400000000 PermissionCreatePublicThreads = 0x0000000800000000 PermissionCreatePrivateThreads = 0x0000001000000000 + PermissionUseExternalStickers = 0x0000002000000000 PermissionSendMessagesInThreads = 0x0000004000000000 ) @@ -1745,6 +1833,7 @@ const ( PermissionVoiceMoveMembers = 0x0000000001000000 PermissionVoiceUseVAD = 0x0000000002000000 PermissionVoiceRequestToSpeak = 0x0000000100000000 + PermissionUseActivities = 0x0000008000000000 ) // Constants for general management. @@ -1754,6 +1843,7 @@ const ( PermissionManageRoles = 0x0000000010000000 PermissionManageWebhooks = 0x0000000020000000 PermissionManageEmojis = 0x0000000040000000 + PermissionManageEvents = 0x0000000200000000 ) // Constants for the different bit offsets of general permissions diff --git a/vendor/github.com/bwmarrin/discordgo/types.go b/vendor/github.com/bwmarrin/discordgo/types.go deleted file mode 100644 index 7f969aef..00000000 --- a/vendor/github.com/bwmarrin/discordgo/types.go +++ /dev/null @@ -1,47 +0,0 @@ -// Discordgo - Discord bindings for Go -// Available at https://github.com/bwmarrin/discordgo - -// Copyright 2015-2016 Bruce Marriner <bruce@sqls.net>. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file contains custom types, currently only a timestamp wrapper. - -package discordgo - -import ( - "encoding/json" - "net/http" -) - -// RESTError stores error information about a request with a bad response code. -// Message is not always present, there are cases where api calls can fail -// without returning a json message. -type RESTError struct { - Request *http.Request - Response *http.Response - ResponseBody []byte - - Message *APIErrorMessage // Message may be nil. -} - -func newRestError(req *http.Request, resp *http.Response, body []byte) *RESTError { - restErr := &RESTError{ - Request: req, - Response: resp, - ResponseBody: body, - } - - // Attempt to decode the error and assume no message was provided if it fails - var msg *APIErrorMessage - err := json.Unmarshal(body, &msg) - if err == nil { - restErr.Message = msg - } - - return restErr -} - -func (r RESTError) Error() string { - return "HTTP " + r.Response.Status + ", " + string(r.ResponseBody) -} diff --git a/vendor/github.com/bwmarrin/discordgo/util.go b/vendor/github.com/bwmarrin/discordgo/util.go index 62313033..86f43b5f 100644 --- a/vendor/github.com/bwmarrin/discordgo/util.go +++ b/vendor/github.com/bwmarrin/discordgo/util.go @@ -2,7 +2,6 @@ package discordgo import ( "bytes" - "encoding/json" "fmt" "io" "mime/multipart" @@ -30,7 +29,7 @@ func MultipartBodyWithJSON(data interface{}, files []*File) (requestContentType body := &bytes.Buffer{} bodywriter := multipart.NewWriter(body) - payload, err := json.Marshal(data) + payload, err := Marshal(data) if err != nil { return } diff --git a/vendor/github.com/bwmarrin/discordgo/wsapi.go b/vendor/github.com/bwmarrin/discordgo/wsapi.go index f2c228d5..dad3fb98 100644 --- a/vendor/github.com/bwmarrin/discordgo/wsapi.go +++ b/vendor/github.com/bwmarrin/discordgo/wsapi.go @@ -409,10 +409,13 @@ func (s *Session) UpdateStatusComplex(usd UpdateStatusData) (err error) { } type requestGuildMembersData struct { - GuildIDs []string `json:"guild_id"` - Query string `json:"query"` - Limit int `json:"limit"` - Presences bool `json:"presences"` + // TODO: Deprecated. Use string instead of []string + GuildIDs []string `json:"guild_id"` + Query *string `json:"query,omitempty"` + UserIDs *[]string `json:"user_ids,omitempty"` + Limit int `json:"limit"` + Nonce string `json:"nonce,omitempty"` + Presences bool `json:"presences"` } type requestGuildMembersOp struct { @@ -425,29 +428,59 @@ type requestGuildMembersOp struct { // guildID : Single Guild ID to request members of // query : String that username starts with, leave empty to return all members // limit : Max number of items to return, or 0 to request all members matched +// nonce : Nonce to identify the Guild Members Chunk response // presences : Whether to request presences of guild members -func (s *Session) RequestGuildMembers(guildID string, query string, limit int, presences bool) (err error) { +func (s *Session) RequestGuildMembers(guildID, query string, limit int, nonce string, presences bool) error { + return s.RequestGuildMembersBatch([]string{guildID}, query, limit, nonce, presences) +} + +// RequestGuildMembersList requests guild members from the gateway +// The gateway responds with GuildMembersChunk events +// guildID : Single Guild ID to request members of +// userIDs : IDs of users to fetch +// limit : Max number of items to return, or 0 to request all members matched +// nonce : Nonce to identify the Guild Members Chunk response +// presences : Whether to request presences of guild members +func (s *Session) RequestGuildMembersList(guildID string, userIDs []string, limit int, nonce string, presences bool) error { + return s.RequestGuildMembersBatchList([]string{guildID}, userIDs, limit, nonce, presences) +} + +// RequestGuildMembersBatch requests guild members from the gateway +// The gateway responds with GuildMembersChunk events +// guildID : Slice of guild IDs to request members of +// query : String that username starts with, leave empty to return all members +// limit : Max number of items to return, or 0 to request all members matched +// nonce : Nonce to identify the Guild Members Chunk response +// presences : Whether to request presences of guild members +// +// NOTE: this function is deprecated, please use RequestGuildMembers instead +func (s *Session) RequestGuildMembersBatch(guildIDs []string, query string, limit int, nonce string, presences bool) (err error) { data := requestGuildMembersData{ - GuildIDs: []string{guildID}, - Query: query, + GuildIDs: guildIDs, + Query: &query, Limit: limit, + Nonce: nonce, Presences: presences, } err = s.requestGuildMembers(data) return } -// RequestGuildMembersBatch requests guild members from the gateway +// RequestGuildMembersBatchList requests guild members from the gateway // The gateway responds with GuildMembersChunk events // guildID : Slice of guild IDs to request members of -// query : String that username starts with, leave empty to return all members +// userIDs : IDs of users to fetch // limit : Max number of items to return, or 0 to request all members matched +// nonce : Nonce to identify the Guild Members Chunk response // presences : Whether to request presences of guild members -func (s *Session) RequestGuildMembersBatch(guildIDs []string, query string, limit int, presences bool) (err error) { +// +// NOTE: this function is deprecated, please use RequestGuildMembersList instead +func (s *Session) RequestGuildMembersBatchList(guildIDs []string, userIDs []string, limit int, nonce string, presences bool) (err error) { data := requestGuildMembersData{ GuildIDs: guildIDs, - Query: query, + UserIDs: &userIDs, Limit: limit, + Nonce: nonce, Presences: presences, } err = s.requestGuildMembers(data) diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index df57b1b2..34488e62 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -1,130 +1,40 @@ -# File system notifications for Go +# WARNING -[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) +If you are reading this, you use `master` branch of this repository, +which is wrong. -fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: +This branch + - should not be used; + - is not maintained; + - is not supported; + - will be removed soon. -```console -go get -u golang.org/x/sys/... -``` - -Cross platform: Windows, Linux, BSD and macOS. - -| Adapter | OS | Status | -| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported | -| kqueue | BSD, macOS, iOS\* | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | -| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | - -\* Android and iOS are untested. - -Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. - -## API stability - -fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). - -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. - -Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. +You should switch to using the default branch instead. -## Usage +## Using git -```go -package main +Here's how to switch your existing local copy of this repository from `master` +to `main` (assuming the remote name is `origin`): -import ( - "log" - - "github.com/fsnotify/fsnotify" -) - -func main() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done -} +``` +git branch -m master main +git fetch origin +git branch -u origin/main main +git remote set-head origin -a ``` -## Contributing - -Please refer to [CONTRIBUTING][] before opening an issue or pull request. - -## Example - -See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). - -## FAQ - -**When a file is moved to another directory is it still being watched?** - -No (it shouldn't be, unless you are watching where it was moved to). - -**When I watch a directory, are all subdirectories watched as well?** - -No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). - -**Do I have to watch the Error and Event channels in a separate goroutine?** - -As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) - -**Why am I receiving multiple events for the same file on OS X?** - -Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). - -**How many files can be watched at once?** - -There are OS-specific limits as to how many watches can be created: -* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. -* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. - -**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** - -fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. - -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#18]: https://github.com/fsnotify/fsnotify/issues/18 -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#7]: https://github.com/howeyc/fsnotify/issues/7 +In addition to the above, if you want to remove the leftover `origin/master` +remote branch (NOTE this also removes all other remote branches that no longer +exist in `origin`): -[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md +``` +git remote prune origin +``` -## Related Projects +## Background -* [notify](https://github.com/rjeczalik/notify) -* [fsevents](https://github.com/fsnotify/fsevents) +The `master` branch was renamed to `main`, causing an issue with +Yocto/OpenEmbedded's meta-virtualization layer, which explicitly refers +to `master` branch of this repository (see #426). +This temporary branch is created to alleviate the Yocto/OE issue. diff --git a/vendor/github.com/keybase/go-keybase-chat-bot/kbchat/kbchat.go b/vendor/github.com/keybase/go-keybase-chat-bot/kbchat/kbchat.go index 886cfff2..b4b5e5e8 100644 --- a/vendor/github.com/keybase/go-keybase-chat-bot/kbchat/kbchat.go +++ b/vendor/github.com/keybase/go-keybase-chat-bot/kbchat/kbchat.go @@ -303,7 +303,8 @@ func (a *API) startPipes() (err error) { cmd := a.runOpts.Command("chat", "notification-settings", fmt.Sprintf("-disable-typing=%v", !a.runOpts.EnableTyping)) if err = cmd.Run(); err != nil { - return fmt.Errorf("unable to set notifiation settings %v", err) + // This is a performance optimization but isn't a fatal error. + a.Debug("unable to set notifiation settings %v", err) } a.apiCmd = a.runOpts.Command("chat", "api") @@ -312,7 +313,7 @@ func (a *API) startPipes() (err error) { } output, err := a.apiCmd.StdoutPipe() if err != nil { - return fmt.Errorf("unabel to get api stdout: %v", err) + return fmt.Errorf("unable to get api stdout: %v", err) } if runtime.GOOS != "windows" { a.apiCmd.ExtraFiles = []*os.File{output.(*os.File)} diff --git a/vendor/github.com/lrstanley/girc/CODE_OF_CONDUCT.md b/vendor/github.com/lrstanley/girc/CODE_OF_CONDUCT.md deleted file mode 100644 index 0eb65874..00000000 --- a/vendor/github.com/lrstanley/girc/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,122 +0,0 @@ -<!-- THIS FILE IS GENERATED! DO NOT EDIT! Maintained by Terraform. --> -# Code of Conduct - -## Our Pledge :purple_heart: - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, caste, color, religion, or sexual -identity and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -* Focusing on what is best not just for us as individuals, but for the overall - community - -Examples of unacceptable behavior include: - -* The use of sexualized language or imagery, and sexual attention or advances of - any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email address, - without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -disclosure@liamstanley.io. All complaints will be reviewed and investigated -promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series of -actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or permanent -ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within the -community. - -## Attribution - -This Code of Conduct is adapted from the Contributor Covenant, -version 2.1, available [here](https://www.contributor-covenant.org/version/2/1/code_of_conduct.html). - -For answers to common questions about this code of conduct, see the [FAQ](https://www.contributor-covenant.org/faq). -Translations are available at [translations](https://www.contributor-covenant.org/translations). diff --git a/vendor/github.com/lrstanley/girc/CONTRIBUTING.md b/vendor/github.com/lrstanley/girc/CONTRIBUTING.md deleted file mode 100644 index 9c237e3c..00000000 --- a/vendor/github.com/lrstanley/girc/CONTRIBUTING.md +++ /dev/null @@ -1,106 +0,0 @@ -<!-- THIS FILE IS GENERATED! DO NOT EDIT! Maintained by Terraform. --> -# :handshake: Contributing - -This document outlines some of the guidelines that we try and adhere to while -working on this project. - -> :point_right: **Note**: before participating in the community, please read our -> [Code of Conduct][coc]. -> By interacting with this repository, organization, or community you agree to -> abide by our Code of Conduct. -> -> Additionally, if you contribute **any source code** to this repository, you -> agree to the terms of the [Developer Certificate of Origin][dco]. This helps -> ensure that contributions aren't in violation of 3rd party license terms. - -## :lady_beetle: Issue submission - -When [submitting an issue][issues] or bug report, -please follow these guidelines: - - * Provide as much information as possible (logs, metrics, screenshots, - runtime environment, etc). - * Ensure that you are running on the latest stable version (tagged), or - when using `master`, provide the specific commit being used. - * Provide the minimum needed viable source to replicate the problem. - -## :bulb: Feature requests - -When [submitting a feature request][issues], please -follow these guidelines: - - * Does this feature benefit others? or just your usecase? If the latter, - it will likely be declined, unless it has a more broad benefit to others. - * Please include the pros and cons of the feature. - * If possible, describe how the feature would work, and any diagrams/mock - examples of what the feature would look like. - -## :rocket: Pull requests - -To review what is currently being worked on, or looked into, feel free to head -over to the [open pull requests][pull-requests] or [issues list][issues]. - -## :raised_back_of_hand: Assistance with discussions - - * Take a look at the [open discussions][discussions], and if you feel like - you'd like to help out other members of the community, it would be much - appreciated! - -## :pushpin: Guidelines - -### :test_tube: Language agnostic - -Below are a few guidelines if you would like to contribute: - - * If the feature is large or the bugfix has potential breaking changes, - please open an issue first to ensure the changes go down the best path. - * If possible, break the changes into smaller PRs. Pull requests should be - focused on a specific feature/fix. - * Pull requests will only be accepted with sufficient documentation - describing the new functionality/fixes. - * Keep the code simple where possible. Code that is smaller/more compact - does not mean better. Don't do magic behind the scenes. - * Use the same formatting/styling/structure as existing code. - * Follow idioms and community-best-practices of the related language, - unless the previous above guidelines override what the community - recommends. - * Always test your changes, both the features/fixes being implemented, but - also in the standard way that a user would use the project (not just - your configuration that fixes your issue). - * Only use 3rd party libraries when necessary. If only a small portion of - the library is needed, simply rewrite it within the library to prevent - useless imports. - -### :hamster: Golang - - * See [golang/go/wiki/CodeReviewComments](https://github.com/golang/go/wiki/CodeReviewComments) - * This project uses [golangci-lint](https://golangci-lint.run/) for - Go-related files. This should be available for any editor that supports - `gopls`, however you can also run it locally with `golangci-lint run` - after installing it. - - - - -## :clipboard: References - - * [Open Source: How to Contribute](https://opensource.guide/how-to-contribute/) - * [About pull requests](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests) - * [GitHub Docs](https://docs.github.com/) - -## :speech_balloon: What to do next? - - * :old_key: Find a vulnerability? Check out our [Security and Disclosure][security] policy. - * :link: Repository [License][license]. - * [Support][support] - * [Code of Conduct][coc]. - -<!-- definitions --> -[coc]: https://github.com/lrstanley/girc/blob/master/CODE_OF_CONDUCT.md -[dco]: https://developercertificate.org/ -[discussions]: https://github.com/lrstanley/girc/discussions -[issues]: https://github.com/lrstanley/girc/issues/new/choose -[license]: https://github.com/lrstanley/girc/blob/master/LICENSE -[pull-requests]: https://github.com/lrstanley/girc/issues/new/choose -[security]: https://github.com/lrstanley/girc/security/policy -[support]: https://github.com/lrstanley/girc/blob/master/SUPPORT.md diff --git a/vendor/github.com/lrstanley/girc/README.md b/vendor/github.com/lrstanley/girc/README.md index 0a5fba99..9f62ca85 100644 --- a/vendor/github.com/lrstanley/girc/README.md +++ b/vendor/github.com/lrstanley/girc/README.md @@ -3,32 +3,42 @@ <!-- do not edit anything in this "template" block, its auto-generated --> <p align="center">girc -- :bomb: girc is a flexible IRC library for Go :ok_hand:</p> <p align="center"> + <a href="https://github.com/lrstanley/girc/tags"> + <img title="Latest Semver Tag" src="https://img.shields.io/github/v/tag/lrstanley/girc?style=flat-square"> + </a> + <a href="https://github.com/lrstanley/girc/commits/master"> + <img title="Last commit" src="https://img.shields.io/github/last-commit/lrstanley/girc?style=flat-square"> + </a> <a href="https://github.com/lrstanley/girc/actions?query=workflow%3Atest+event%3Apush"> - <img alt="GitHub Workflow Status (test @ master)" src="https://img.shields.io/github/workflow/status/lrstanley/girc/test/master?label=test&style=flat-square&event=push"> + <img title="GitHub Workflow Status (test @ master)" src="https://img.shields.io/github/workflow/status/lrstanley/girc/test/master?label=test&style=flat-square&event=push"> </a> - <img alt="Code Coverage" src="https://img.shields.io/codecov/c/github/lrstanley/girc/master?style=flat-square"> + <a href="https://codecov.io/gh/lrstanley/girc"> + <img title="Code Coverage" src="https://img.shields.io/codecov/c/github/lrstanley/girc/master?style=flat-square"> + </a> <a href="https://pkg.go.dev/github.com/lrstanley/girc"> - <img alt="Go Documentation" src="https://pkg.go.dev/badge/github.com/lrstanley/girc?style=flat-square"> + <img title="Go Documentation" src="https://pkg.go.dev/badge/github.com/lrstanley/girc?style=flat-square"> </a> <a href="https://goreportcard.com/report/github.com/lrstanley/girc"> - <img alt="Go Report Card" src="https://goreportcard.com/badge/github.com/lrstanley/girc?style=flat-square"> + <img title="Go Report Card" src="https://goreportcard.com/badge/github.com/lrstanley/girc?style=flat-square"> </a> - <img alt="Bug reports" src="https://img.shields.io/github/issues/lrstanley/girc/bug?label=issues&style=flat-square"> - <img alt="Feature requests" src="https://img.shields.io/github/issues/lrstanley/girc/enhancement?label=feature%20requests&style=flat-square"> - <a href="https://github.com/lrstanley/girc/pulls"> - <img alt="Open Pull Requests" src="https://img.shields.io/github/issues-pr/lrstanley/girc?style=flat-square"> +</p> +<p align="center"> + <a href="https://github.com/lrstanley/girc/issues?q=is:open+is:issue+label:bug"> + <img title="Bug reports" src="https://img.shields.io/github/issues/lrstanley/girc/bug?label=issues&style=flat-square"> </a> - <a href="https://github.com/lrstanley/girc/tags"> - <img alt="Latest Semver Tag" src="https://img.shields.io/github/v/tag/lrstanley/girc?style=flat-square"> + <a href="https://github.com/lrstanley/girc/issues?q=is:open+is:issue+label:enhancement"> + <img title="Feature requests" src="https://img.shields.io/github/issues/lrstanley/girc/enhancement?label=feature%20requests&style=flat-square"> + </a> + <a href="https://github.com/lrstanley/girc/pulls"> + <img title="Open Pull Requests" src="https://img.shields.io/github/issues-pr/lrstanley/girc?label=prs&style=flat-square"> </a> - <img alt="Last commit" src="https://img.shields.io/github/last-commit/lrstanley/girc?style=flat-square"> <a href="https://github.com/lrstanley/girc/discussions/new?category=q-a"> - <img alt="Ask a Question" src="https://img.shields.io/badge/discussions-ask_a_question!-green?style=flat-square"> + <img title="Ask a Question" src="https://img.shields.io/badge/support-ask_a_question!-blue?style=flat-square"> </a> - <a href="https://liam.sh/chat"><img src="https://img.shields.io/badge/discord-bytecord-blue.svg?style=flat-square" alt="Discord Chat"></a> + <a href="https://liam.sh/chat"><img src="https://img.shields.io/badge/discord-bytecord-blue.svg?style=flat-square" title="Discord Chat"></a> </p> <!-- template:end:header --> @@ -40,7 +50,7 @@ - [Installing](#installing) - [Examples](#examples) - [References](#references) - - [Support & Assistance](#raising_hand_man-support-assistance) + - [Support & Assistance](#raising_hand_man-support--assistance) - [Contributing](#handshake-contributing) - [License](#balance_scale-license) <!-- template:end:toc --> @@ -107,10 +117,10 @@ Working on a project and want to add it to the list? Submit a pull request! <!-- do not edit anything in this "template" block, its auto-generated --> ## :raising_hand_man: Support & Assistance - * :heart: Please review the [Code of Conduct](CODE_OF_CONDUCT.md) for + * :heart: Please review the [Code of Conduct](.github/CODE_OF_CONDUCT.md) for guidelines on ensuring everyone has the best experience interacting with the community. - * :raising_hand_man: Take a look at the [support](SUPPORT.md) document on + * :raising_hand_man: Take a look at the [support](.github/SUPPORT.md) document on guidelines for tips on how to ask the right questions. * :lady_beetle: For all features/bugs/issues/questions/etc, [head over here](https://github.com/lrstanley/girc/issues/new/choose). <!-- template:end:support --> @@ -119,10 +129,10 @@ Working on a project and want to add it to the list? Submit a pull request! <!-- do not edit anything in this "template" block, its auto-generated --> ## :handshake: Contributing - * :heart: Please review the [Code of Conduct](CODE_OF_CONDUCT.md) for guidelines + * :heart: Please review the [Code of Conduct](.github/CODE_OF_CONDUCT.md) for guidelines on ensuring everyone has the best experience interacting with the community. - * :clipboard: Please review the [contributing](CONTRIBUTING.md) doc for submitting + * :clipboard: Please review the [contributing](.github/CONTRIBUTING.md) doc for submitting issues/a guide on submitting pull requests and helping out. * :old_key: For anything security related, please review this repositories [security policy](https://github.com/lrstanley/girc/security/policy). <!-- template:end:contributing --> diff --git a/vendor/github.com/lrstanley/girc/SECURITY.md b/vendor/github.com/lrstanley/girc/SECURITY.md deleted file mode 100644 index 7c951d0c..00000000 --- a/vendor/github.com/lrstanley/girc/SECURITY.md +++ /dev/null @@ -1,54 +0,0 @@ -<!-- THIS FILE IS GENERATED! DO NOT EDIT! Maintained by Terraform. --> -# :old_key: Security Policy - -## :heavy_check_mark: Supported Versions - -The following restrictions apply for versions that are still supported in terms of security and bug fixes: - - * :grey_question: Must be using the latest major/minor version. - * :grey_question: Must be using a supported platform for the repository (e.g. OS, browser, etc), and that platform must - be within its supported versions (for example: don't use a legacy or unsupported version of Ubuntu or - Google Chrome). - * :grey_question: Repository must not be archived (unless the vulnerability is critical, and the repository moderately - popular). - * :heavy_check_mark: - -If one of the above doesn't apply to you, feel free to submit an issue and we can discuss the -issue/vulnerability further. - - -## :lady_beetle: Reporting a Vulnerability - -Best method of contact: [GPG :key:](https://github.com/lrstanley.gpg) - - * :speech_balloon: [Discord][chat]: message `/home/liam#0000`. - * :email: Email: `security@liamstanley.io` - -Backup contacts (if I am unresponsive after **48h**): [GPG :key:](https://github.com/FM1337.gpg) - * :speech_balloon: [Discord][chat]: message `Allen#7440`. - * :email: Email: `security@allenlydiard.ca` - -If you feel that this disclosure doesn't include a critical vulnerability and there is no sensitive -information in the disclosure, you don't have to use the GPG key. For all other situations, please -use it. - -### :stopwatch: Vulnerability disclosure expectations - - * :no_bell: We expect you to not share this information with others, unless: - * The maximum timeline for initial response has been exceeded (shown below). - * The maximum resolution time has been exceeded (shown below). - * :mag_right: We expect you to responsibly investigate this vulnerability -- please do not utilize the - vulnerability beyond the initial findings. - * :stopwatch: Initial response within 48h, however, if the primary contact shown above is unavailable, please - use the backup contacts provided. The maximum timeline for an initial response should be within - 7 days. - * :stopwatch: Depending on the severity of the disclosure, resolution time may be anywhere from 24h to 2 - weeks after initial response, though in most cases it will likely be closer to the former. - * If the vulnerability is very low/low in terms of risk, the above timelines **will not apply**. - * :toolbox: Before the release of resolved versions, a [GitHub Security Advisory][advisory-docs]. - will be released on the respective repository. [Browser all advisories here][advisory]. - -<!-- definitions --> -[chat]: https://liam.sh/chat -[advisory]: https://github.com/advisories?query=type%3Areviewed+ecosystem%3Ago -[advisory-docs]: https://docs.github.com/en/code-security/repository-security-advisories/creating-a-repository-security-advisory diff --git a/vendor/github.com/lrstanley/girc/SUPPORT.md b/vendor/github.com/lrstanley/girc/SUPPORT.md deleted file mode 100644 index aca24782..00000000 --- a/vendor/github.com/lrstanley/girc/SUPPORT.md +++ /dev/null @@ -1,56 +0,0 @@ -# :raising_hand_man: Support - -This document explains where and how to get help with most of my projects. -Please ensure you read through it thoroughly. - -> :point_right: **Note**: before participating in the community, please read our -> [Code of Conduct][coc]. -> By interacting with this repository, organization, or community you agree to -> abide by its terms. - -## :grey_question: Asking quality questions - -Questions can go to [Github Discussions][discussions] or feel free to join -the Discord [here][chat]. - -Help me help you! Spend time framing questions and add links and resources. -Spending the extra time up front can help save everyone time in the long run. -Here are some tips: - -* Don't fall for the [XY problem][xy]. -* Search to find out if a similar question has been asked or if a similar - issue/bug has been reported. -* Try to define what you need help with: - * Is there something in particular you want to do? - * What problem are you encountering and what steps have you taken to try - and fix it? - * Is there a concept you don't understand? -* Provide sample code, such as a [CodeSandbox][cs] or a simple snippet, if - possible. -* Screenshots can help, but if there's important text such as code or error - messages in them, please also provide those. -* The more time you put into asking your question, the better I and others - can help you. - -## :old_key: Security - -For any security or vulnerability related disclosure, please follow the -guidelines outlined in our [security policy][security]. - -## :handshake: Contributions - -See [`CONTRIBUTING.md`][contributing] on how to contribute. - -<!-- definitions --> -[coc]: https://github.com/lrstanley/girc/blob/master/CODE_OF_CONDUCT.md -[contributing]: https://github.com/lrstanley/girc/blob/master/CONTRIBUTING.md -[discussions]: https://github.com/lrstanley/girc/discussions/categories/q-a -[issues]: https://github.com/lrstanley/girc/issues/new/choose -[license]: https://github.com/lrstanley/girc/blob/master/LICENSE -[pull-requests]: https://github.com/lrstanley/girc/issues/new/choose -[security]: https://github.com/lrstanley/girc/security/policy -[support]: https://github.com/lrstanley/girc/blob/master/SUPPORT.md - -[xy]: https://meta.stackexchange.com/questions/66377/what-is-the-xy-problem/66378#66378 -[chat]: https://liam.sh/chat -[cs]: https://codesandbox.io diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go index 367166d5..e1e9dd7b 100644 --- a/vendor/github.com/magiconair/properties/lex.go +++ b/vendor/github.com/magiconair/properties/lex.go @@ -128,18 +128,6 @@ func (l *lexer) acceptRun(valid string) { l.backup() } -// acceptRunUntil consumes a run of runes up to a terminator. -func (l *lexer) acceptRunUntil(term rune) { - for term != l.next() { - } - l.backup() -} - -// hasText returns true if the current parsed text is not empty. -func (l *lexer) isNotEmpty() bool { - return l.pos > l.start -} - // lineNumber reports which line we're on, based on the position of // the previous item returned by nextItem. Doing it this way // means we don't have to worry about peek double counting. diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go index cdc4a803..430e4fcd 100644 --- a/vendor/github.com/magiconair/properties/parser.go +++ b/vendor/github.com/magiconair/properties/parser.go @@ -59,14 +59,6 @@ func (p *parser) errorf(format string, args ...interface{}) { panic(fmt.Errorf(format, args...)) } -func (p *parser) expect(expected itemType) (token item) { - token = p.lex.nextItem() - if token.typ != expected { - p.unexpected(token) - } - return token -} - func (p *parser) expectOneOf(expected ...itemType) (token item) { token = p.lex.nextItem() for _, v := range expected { @@ -91,5 +83,4 @@ func (p *parser) recover(errp *error) { } *errp = e.(error) } - return } diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go index 1529e722..62ae2d67 100644 --- a/vendor/github.com/magiconair/properties/properties.go +++ b/vendor/github.com/magiconair/properties/properties.go @@ -786,7 +786,6 @@ func expand(s string, keys []string, prefix, postfix string, values map[string]s } s = s[:start] + new_val + s[end+1:] } - return s, nil } // encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. diff --git a/vendor/github.com/pelletier/go-toml/v2/.dockerignore b/vendor/github.com/pelletier/go-toml/v2/.dockerignore new file mode 100644 index 00000000..7b588347 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.dockerignore @@ -0,0 +1,2 @@ +cmd/tomll/tomll +cmd/tomljson/tomljson diff --git a/vendor/github.com/pelletier/go-toml/v2/.gitattributes b/vendor/github.com/pelletier/go-toml/v2/.gitattributes new file mode 100644 index 00000000..34a0a21a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.gitattributes @@ -0,0 +1,4 @@ +* text=auto + +benchmark/benchmark.toml text eol=lf +testdata/** text eol=lf diff --git a/vendor/github.com/pelletier/go-toml/v2/.gitignore b/vendor/github.com/pelletier/go-toml/v2/.gitignore new file mode 100644 index 00000000..a69e2b0e --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.gitignore @@ -0,0 +1,6 @@ +test_program/test_program_bin +fuzz/ +cmd/tomll/tomll +cmd/tomljson/tomljson +cmd/tomltestgen/tomltestgen +dist
\ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/v2/.golangci.toml b/vendor/github.com/pelletier/go-toml/v2/.golangci.toml new file mode 100644 index 00000000..067db551 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.golangci.toml @@ -0,0 +1,84 @@ +[service] +golangci-lint-version = "1.39.0" + +[linters-settings.wsl] +allow-assign-and-anything = true + +[linters-settings.exhaustive] +default-signifies-exhaustive = true + +[linters] +disable-all = true +enable = [ + "asciicheck", + "bodyclose", + "cyclop", + "deadcode", + "depguard", + "dogsled", + "dupl", + "durationcheck", + "errcheck", + "errorlint", + "exhaustive", + # "exhaustivestruct", + "exportloopref", + "forbidigo", + # "forcetypeassert", + "funlen", + "gci", + # "gochecknoglobals", + "gochecknoinits", + "gocognit", + "goconst", + "gocritic", + "gocyclo", + "godot", + "godox", + # "goerr113", + "gofmt", + "gofumpt", + "goheader", + "goimports", + "golint", + "gomnd", + # "gomoddirectives", + "gomodguard", + "goprintffuncname", + "gosec", + "gosimple", + "govet", + # "ifshort", + "importas", + "ineffassign", + "lll", + "makezero", + "misspell", + "nakedret", + "nestif", + "nilerr", + # "nlreturn", + "noctx", + "nolintlint", + #"paralleltest", + "prealloc", + "predeclared", + "revive", + "rowserrcheck", + "sqlclosecheck", + "staticcheck", + "structcheck", + "stylecheck", + # "testpackage", + "thelper", + "tparallel", + "typecheck", + "unconvert", + "unparam", + "unused", + "varcheck", + "wastedassign", + "whitespace", + # "wrapcheck", + # "wsl" +] diff --git a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml new file mode 100644 index 00000000..793fb184 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml @@ -0,0 +1,111 @@ +before: + hooks: + - go mod tidy + - go fmt ./... + - go test ./... +builds: + - id: tomll + main: ./cmd/tomll + binary: tomll + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} + mod_timestamp: '{{ .CommitTimestamp }}' + targets: + - linux_amd64 + - windows_amd64 + - darwin_amd64 + - darwin_arm64 + - id: tomljson + main: ./cmd/tomljson + binary: tomljson + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} + mod_timestamp: '{{ .CommitTimestamp }}' + targets: + - linux_amd64 + - windows_amd64 + - darwin_amd64 + - darwin_arm64 + - id: jsontoml + main: ./cmd/jsontoml + binary: jsontoml + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} + mod_timestamp: '{{ .CommitTimestamp }}' + targets: + - linux_amd64 + - windows_amd64 + - darwin_amd64 + - darwin_arm64 +universal_binaries: + - id: tomll + replace: true + name_template: tomll + - id: tomljson + replace: true + name_template: tomljson + - id: jsontoml + replace: true + name_template: jsontoml +archives: +- id: jsontoml + format: tar.xz + builds: + - jsontoml + files: + - none* + name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" +- id: tomljson + format: tar.xz + builds: + - tomljson + files: + - none* + name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" +- id: tomll + format: tar.xz + builds: + - tomll + files: + - none* + name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" +dockers: + - id: tools + goos: linux + goarch: amd64 + ids: + - jsontoml + - tomljson + - tomll + image_templates: + - "ghcr.io/pelletier/go-toml:latest" + - "ghcr.io/pelletier/go-toml:{{ .Tag }}" + - "ghcr.io/pelletier/go-toml:v{{ .Major }}" + skip_push: false +checksum: + name_template: 'sha256sums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +release: + github: + owner: pelletier + name: go-toml + draft: true + prerelease: auto + mode: replace +changelog: + use: github-native +announce: + skip: true diff --git a/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md new file mode 100644 index 00000000..04dd12bc --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md @@ -0,0 +1,196 @@ +# Contributing + +Thank you for your interest in go-toml! We appreciate you considering +contributing to go-toml! + +The main goal is the project is to provide an easy-to-use and efficient TOML +implementation for Go that gets the job done and gets out of your way – dealing +with TOML is probably not the central piece of your project. + +As the single maintainer of go-toml, time is scarce. All help, big or small, is +more than welcomed! + +## Ask questions + +Any question you may have, somebody else might have it too. Always feel free to +ask them on the [discussion board][discussions]. We will try to answer them as +clearly and quickly as possible, time permitting. + +Asking questions also helps us identify areas where the documentation needs +improvement, or new features that weren't envisioned before. Sometimes, a +seemingly innocent question leads to the fix of a bug. Don't hesitate and ask +away! + +[discussions]: https://github.com/pelletier/go-toml/discussions + +## Improve the documentation + +The best way to share your knowledge and experience with go-toml is to improve +the documentation. Fix a typo, clarify an interface, add an example, anything +goes! + +The documentation is present in the [README][readme] and thorough the source +code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a change +to the documentation, create a pull request with your proposed changes. For +simple changes like that, the easiest way to go is probably the "Fork this +project and edit the file" button on Github, displayed at the top right of the +file. Unless it's a trivial change (for example a typo), provide a little bit of +context in your pull request description or commit message. + +## Report a bug + +Found a bug! Sorry to hear that :(. Help us and other track them down and fix by +reporting it. [File a new bug report][bug-report] on the [issues +tracker][issues-tracker]. The template should provide enough guidance on what to +include. When in doubt: add more details! By reducing ambiguity and providing +more information, it decreases back and forth and saves everyone time. + +## Code changes + +Want to contribute a patch? Very happy to hear that! + +First, some high-level rules: + +- A short proposal with some POC code is better than a lengthy piece of text + with no code. Code speaks louder than words. That being said, bigger changes + should probably start with a [discussion][discussions]. +- No backward-incompatible patch will be accepted unless discussed. Sometimes + it's hard, but we try not to break people's programs unless we absolutely have + to. +- If you are writing a new feature or extending an existing one, make sure to + write some documentation. +- Bug fixes need to be accompanied with regression tests. +- New code needs to be tested. +- Your commit messages need to explain why the change is needed, even if already + included in the PR description. + +It does sound like a lot, but those best practices are here to save time overall +and continuously improve the quality of the project, which is something everyone +benefits from. + +### Get started + +The fairly standard code contribution process looks like that: + +1. [Fork the project][fork]. +2. Make your changes, commit on any branch you like. +3. [Open up a pull request][pull-request] +4. Review, potential ask for changes. +5. Merge. + +Feel free to ask for help! You can create draft pull requests to gather +some early feedback! + +### Run the tests + +You can run tests for go-toml using Go's test tool: `go test -race ./...`. + +During the pull request process, all tests will be ran on Linux, Windows, and +MacOS on the last two versions of Go. + +However, given GitHub's new policy to _not_ run Actions on pull requests until a +maintainer clicks on button, it is highly recommended that you run them locally +as you make changes. + +### Check coverage + +We use `go tool cover` to compute test coverage. Most code editors have a way to +run and display code coverage, but at the end of the day, we do this: + +``` +go test -covermode=atomic -coverprofile=coverage.out +go tool cover -func=coverage.out +``` + +and verify that the overall percentage of tested code does not go down. This is +a requirement. As a rule of thumb, all lines of code touched by your changes +should be covered. On Unix you can use `./ci.sh coverage -d v2` to check if your +code lowers the coverage. + +### Verify performance + +Go-toml aims to stay efficient. We rely on a set of scenarios executed with Go's +builtin benchmark systems. Because of their noisy nature, containers provided by +Github Actions cannot be reliably used for benchmarking. As a result, you are +responsible for checking that your changes do not incur a performance penalty. +You can run their following to execute benchmarks: + +``` +go test ./... -bench=. -count=10 +``` + +Benchmark results should be compared against each other with +[benchstat][benchstat]. Typical flow looks like this: + +1. On the `v2` branch, run `go test ./... -bench=. -count 10` and save output to + a file (for example `old.txt`). +2. Make some code changes. +3. Run `go test ....` again, and save the output to an other file (for example + `new.txt`). +4. Run `benchstat old.txt new.txt` to check that time/op does not go up in any + test. + +On Unix you can use `./ci.sh benchmark -d v2` to verify how your code impacts +performance. + +It is highly encouraged to add the benchstat results to your pull request +description. Pull requests that lower performance will receive more scrutiny. + +[benchstat]: https://pkg.go.dev/golang.org/x/perf/cmd/benchstat + +### Style + +Try to look around and follow the same format and structure as the rest of the +code. We enforce using `go fmt` on the whole code base. + +--- + +## Maintainers-only + +### Merge pull request + +Checklist: + +- Passing CI. +- Does not introduce backward-incompatible changes (unless discussed). +- Has relevant doc changes. +- Benchstat does not show performance regression. +- Pull request is [labeled appropriately][pr-labels]. +- Title will be understandable in the changelog. + +1. Merge using "squash and merge". +2. Make sure to edit the commit message to keep all the useful information + nice and clean. +3. Make sure the commit title is clear and contains the PR number (#123). + +### New release + +1. Decide on the next version number. Use semver. +2. Generate release notes using [`gh`][gh]. Example: +``` +$ gh api -X POST \ + -F tag_name='v2.0.0-beta.5' \ + -F target_commitish='v2' \ + -F previous_tag_name='v2.0.0-beta.4' \ + --jq '.body' \ + repos/pelletier/go-toml/releases/generate-notes +``` +3. Look for "Other changes". That would indicate a pull request not labeled + properly. Tweak labels and pull request titles until changelog looks good for + users. +4. [Draft new release][new-release]. +5. Fill tag and target with the same value used to generate the changelog. +6. Set title to the new tag value. +7. Paste the generated changelog. +8. Check "create discussion", in the "Releases" category. +9. Check pre-release if new version is an alpha or beta. + +[issues-tracker]: https://github.com/pelletier/go-toml/issues +[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md +[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml +[readme]: ./README.md +[fork]: https://help.github.com/articles/fork-a-repo +[pull-request]: https://help.github.com/en/articles/creating-a-pull-request +[new-release]: https://github.com/pelletier/go-toml/releases/new +[gh]: https://github.com/cli/cli +[pr-labels]: https://github.com/pelletier/go-toml/blob/v2/.github/release.yml diff --git a/vendor/github.com/pelletier/go-toml/v2/Dockerfile b/vendor/github.com/pelletier/go-toml/v2/Dockerfile new file mode 100644 index 00000000..b9e93323 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/Dockerfile @@ -0,0 +1,5 @@ +FROM scratch +ENV PATH "$PATH:/bin" +COPY tomll /bin/tomll +COPY tomljson /bin/tomljson +COPY jsontoml /bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/v2/LICENSE b/vendor/github.com/pelletier/go-toml/v2/LICENSE new file mode 100644 index 00000000..3a38ac28 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 - 2021 Thomas Pelletier, Eric Anderton + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/pelletier/go-toml/v2/README.md b/vendor/github.com/pelletier/go-toml/v2/README.md new file mode 100644 index 00000000..ca5b5ced --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/README.md @@ -0,0 +1,556 @@ +# go-toml v2 + +Go library for the [TOML](https://toml.io/en/) format. + +This library supports [TOML v1.0.0](https://toml.io/en/v1.0.0). + +## Development status + +This is the upcoming major version of go-toml. It is currently in active +development. As of release v2.0.0-beta.1, the library has reached feature parity +with v1, and fixes a lot known bugs and performance issues along the way. + +If you do not need the advanced document editing features of v1, you are +encouraged to try out this version. + +[👉 Roadmap for v2](https://github.com/pelletier/go-toml/discussions/506) + +[🐞 Bug Reports](https://github.com/pelletier/go-toml/issues) + +[💬 Anything else](https://github.com/pelletier/go-toml/discussions) + +## Documentation + +Full API, examples, and implementation notes are available in the Go +documentation. + +[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml/v2.svg)](https://pkg.go.dev/github.com/pelletier/go-toml/v2) + +## Import + +```go +import "github.com/pelletier/go-toml/v2" +``` + +See [Modules](#Modules). + +## Features + +### Stdlib behavior + +As much as possible, this library is designed to behave similarly as the +standard library's `encoding/json`. + +### Performance + +While go-toml favors usability, it is written with performance in mind. Most +operations should not be shockingly slow. See [benchmarks](#benchmarks). + +### Strict mode + +`Decoder` can be set to "strict mode", which makes it error when some parts of +the TOML document was not prevent in the target structure. This is a great way +to check for typos. [See example in the documentation][strict]. + +[strict]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Decoder.DisallowUnknownFields + +### Contextualized errors + +When most decoding errors occur, go-toml returns [`DecodeError`][decode-err]), +which contains a human readable contextualized version of the error. For +example: + +``` +2| key1 = "value1" +3| key2 = "missing2" + | ~~~~ missing field +4| key3 = "missing3" +5| key4 = "value4" +``` + +[decode-err]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#DecodeError + +### Local date and time support + +TOML supports native [local date/times][ldt]. It allows to represent a given +date, time, or date-time without relation to a timezone or offset. To support +this use-case, go-toml provides [`LocalDate`][tld], [`LocalTime`][tlt], and +[`LocalDateTime`][tldt]. Those types can be transformed to and from `time.Time`, +making them convenient yet unambiguous structures for their respective TOML +representation. + +[ldt]: https://toml.io/en/v1.0.0#local-date-time +[tld]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDate +[tlt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalTime +[tldt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDateTime + +## Getting started + +Given the following struct, let's see how to read it and write it as TOML: + +```go +type MyConfig struct { + Version int + Name string + Tags []string +} +``` + +### Unmarshaling + +[`Unmarshal`][unmarshal] reads a TOML document and fills a Go structure with its +content. For example: + +```go +doc := ` +version = 2 +name = "go-toml" +tags = ["go", "toml"] +` + +var cfg MyConfig +err := toml.Unmarshal([]byte(doc), &cfg) +if err != nil { + panic(err) +} +fmt.Println("version:", cfg.Version) +fmt.Println("name:", cfg.Name) +fmt.Println("tags:", cfg.Tags) + +// Output: +// version: 2 +// name: go-toml +// tags: [go toml] +``` + +[unmarshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Unmarshal + +### Marshaling + +[`Marshal`][marshal] is the opposite of Unmarshal: it represents a Go structure +as a TOML document: + +```go +cfg := MyConfig{ + Version: 2, + Name: "go-toml", + Tags: []string{"go", "toml"}, +} + +b, err := toml.Marshal(cfg) +if err != nil { + panic(err) +} +fmt.Println(string(b)) + +// Output: +// Version = 2 +// Name = 'go-toml' +// Tags = ['go', 'toml'] +``` + +[marshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Marshal + +## Benchmarks + +Execution time speedup compared to other Go TOML libraries: + +<table> + <thead> + <tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr> + </thead> + <tbody> + <tr><td>Marshal/HugoFrontMatter-2</td><td>1.9x</td><td>1.9x</td></tr> + <tr><td>Marshal/ReferenceFile/map-2</td><td>1.7x</td><td>1.8x</td></tr> + <tr><td>Marshal/ReferenceFile/struct-2</td><td>2.2x</td><td>2.5x</td></tr> + <tr><td>Unmarshal/HugoFrontMatter-2</td><td>2.9x</td><td>2.9x</td></tr> + <tr><td>Unmarshal/ReferenceFile/map-2</td><td>2.6x</td><td>2.9x</td></tr> + <tr><td>Unmarshal/ReferenceFile/struct-2</td><td>4.4x</td><td>5.3x</td></tr> + </tbody> +</table> +<details><summary>See more</summary> +<p>The table above has the results of the most common use-cases. The table below +contains the results of all benchmarks, including unrealistic ones. It is +provided for completeness.</p> + +<table> + <thead> + <tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr> + </thead> + <tbody> + <tr><td>Marshal/SimpleDocument/map-2</td><td>1.8x</td><td>2.9x</td></tr> + <tr><td>Marshal/SimpleDocument/struct-2</td><td>2.7x</td><td>4.2x</td></tr> + <tr><td>Unmarshal/SimpleDocument/map-2</td><td>4.5x</td><td>3.1x</td></tr> + <tr><td>Unmarshal/SimpleDocument/struct-2</td><td>6.2x</td><td>3.9x</td></tr> + <tr><td>UnmarshalDataset/example-2</td><td>3.1x</td><td>3.5x</td></tr> + <tr><td>UnmarshalDataset/code-2</td><td>2.3x</td><td>3.1x</td></tr> + <tr><td>UnmarshalDataset/twitter-2</td><td>2.5x</td><td>2.6x</td></tr> + <tr><td>UnmarshalDataset/citm_catalog-2</td><td>2.1x</td><td>2.2x</td></tr> + <tr><td>UnmarshalDataset/canada-2</td><td>1.6x</td><td>1.3x</td></tr> + <tr><td>UnmarshalDataset/config-2</td><td>4.3x</td><td>3.2x</td></tr> + <tr><td>[Geo mean]</td><td>2.7x</td><td>2.8x</td></tr> + </tbody> +</table> +<p>This table can be generated with <code>./ci.sh benchmark -a -html</code>.</p> +</details> + +## Modules + +go-toml uses Go's standard modules system. + +Installation instructions: + +- Go ≥ 1.16: Nothing to do. Use the import in your code. The `go` command deals + with it automatically. +- Go ≥ 1.13: `GO111MODULE=on go get github.com/pelletier/go-toml/v2`. + +In case of trouble: [Go Modules FAQ][mod-faq]. + +[mod-faq]: https://github.com/golang/go/wiki/Modules#why-does-installing-a-tool-via-go-get-fail-with-error-cannot-find-main-module + +## Tools + +Go-toml provides three handy command line tools: + + * `tomljson`: Reads a TOML file and outputs its JSON representation. + + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest + $ tomljson --help + ``` + + * `jsontoml`: Reads a JSON file and outputs a TOML representation. + + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest + $ jsontoml --help + ``` + + * `tomll`: Lints and reformats a TOML file. + + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest + $ tomll --help + ``` + +### Docker image + +Those tools are also available as a [Docker image][docker]. For example, to use +`tomljson`: + +``` +docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml +``` + +Multiple versions are availble on [ghcr.io][docker]. + +[docker]: https://github.com/pelletier/go-toml/pkgs/container/go-toml + +## Migrating from v1 + +This section describes the differences between v1 and v2, with some pointers on +how to get the original behavior when possible. + +### Decoding / Unmarshal + +#### Automatic field name guessing + +When unmarshaling to a struct, if a key in the TOML document does not exactly +match the name of a struct field or any of the `toml`-tagged field, v1 tries +multiple variations of the key ([code][v1-keys]). + +V2 instead does a case-insensitive matching, like `encoding/json`. + +This could impact you if you are relying on casing to differentiate two fields, +and one of them is a not using the `toml` struct tag. The recommended solution +is to be specific about tag names for those fields using the `toml` struct tag. + +[v1-keys]: https://github.com/pelletier/go-toml/blob/a2e52561804c6cd9392ebf0048ca64fe4af67a43/marshal.go#L775-L781 + +#### Ignore preexisting value in interface + +When decoding into a non-nil `interface{}`, go-toml v1 uses the type of the +element in the interface to decode the object. For example: + +```go +type inner struct { + B interface{} +} +type doc struct { + A interface{} +} + +d := doc{ + A: inner{ + B: "Before", + }, +} + +data := ` +[A] +B = "After" +` + +toml.Unmarshal([]byte(data), &d) +fmt.Printf("toml v1: %#v\n", d) + +// toml v1: main.doc{A:main.inner{B:"After"}} +``` + +In this case, field `A` is of type `interface{}`, containing a `inner` struct. +V1 sees that type and uses it when decoding the object. + +When decoding an object into an `interface{}`, V2 instead disregards whatever +value the `interface{}` may contain and replaces it with a +`map[string]interface{}`. With the same data structure as above, here is what +the result looks like: + +```go +toml.Unmarshal([]byte(data), &d) +fmt.Printf("toml v2: %#v\n", d) + +// toml v2: main.doc{A:map[string]interface {}{"B":"After"}} +``` + +This is to match `encoding/json`'s behavior. There is no way to make the v2 +decoder behave like v1. + +#### Values out of array bounds ignored + +When decoding into an array, v1 returns an error when the number of elements +contained in the doc is superior to the capacity of the array. For example: + +```go +type doc struct { + A [2]string +} +d := doc{} +err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) +fmt.Println(err) + +// (1, 1): unmarshal: TOML array length (3) exceeds destination array length (2) +``` + +In the same situation, v2 ignores the last value: + +```go +err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) +fmt.Println("err:", err, "d:", d) +// err: <nil> d: {[one two]} +``` + +This is to match `encoding/json`'s behavior. There is no way to make the v2 +decoder behave like v1. + +#### Support for `toml.Unmarshaler` has been dropped + +This method was not widely used, poorly defined, and added a lot of complexity. +A similar effect can be achieved by implementing the `encoding.TextUnmarshaler` +interface and use strings. + +#### Support for `default` struct tag has been dropped + +This feature adds complexity and a poorly defined API for an effect that can be +accomplished outside of the library. + +It does not seem like other format parsers in Go support that feature (the +project referenced in the original ticket #202 has not been updated since 2017). +Given that go-toml v2 should not touch values not in the document, the same +effect can be achieved by pre-filling the struct with defaults (libraries like +[go-defaults][go-defaults] can help). Also, string representation is not well +defined for all types: it creates issues like #278. + +The recommended replacement is pre-filling the struct before unmarshaling. + +[go-defaults]: https://github.com/mcuadros/go-defaults + +#### `toml.Tree` replacement + +This structure was the initial attempt at providing a document model for +go-toml. It allows manipulating the structure of any document, encoding and +decoding from their TOML representation. While a more robust feature was +initially planned in go-toml v2, this has been ultimately [removed from +scope][nodoc] of this library, with no plan to add it back at the moment. The +closest equivalent at the moment would be to unmarshal into an `interface{}` and +use type assertions and/or reflection to manipulate the arbitrary +structure. However this would fall short of providing all of the TOML features +such as adding comments and be specific about whitespace. + + +#### `toml.Position` are not retrievable anymore + +The API for retrieving the position (line, column) of a specific TOML element do +not exist anymore. This was done to minimize the amount of concepts introduced +by the library (query path), and avoid the performance hit related to storing +positions in the absence of a document model, for a feature that seemed to have +little use. Errors however have gained more detailed position +information. Position retrieval seems better fitted for a document model, which +has been [removed from the scope][nodoc] of go-toml v2 at the moment. + +### Encoding / Marshal + +#### Default struct fields order + +V1 emits struct fields order alphabetically by default. V2 struct fields are +emitted in order they are defined. For example: + +```go +type S struct { + B string + A string +} + +data := S{ + B: "B", + A: "A", +} + +b, _ := tomlv1.Marshal(data) +fmt.Println("v1:\n" + string(b)) + +b, _ = tomlv2.Marshal(data) +fmt.Println("v2:\n" + string(b)) + +// Output: +// v1: +// A = "A" +// B = "B" + +// v2: +// B = 'B' +// A = 'A' +``` + +There is no way to make v2 encoder behave like v1. A workaround could be to +manually sort the fields alphabetically in the struct definition, or generate +struct types using `reflect.StructOf`. + +#### No indentation by default + +V1 automatically indents content of tables by default. V2 does not. However the +same behavior can be obtained using [`Encoder.SetIndentTables`][sit]. For example: + +```go +data := map[string]interface{}{ + "table": map[string]string{ + "key": "value", + }, +} + +b, _ := tomlv1.Marshal(data) +fmt.Println("v1:\n" + string(b)) + +b, _ = tomlv2.Marshal(data) +fmt.Println("v2:\n" + string(b)) + +buf := bytes.Buffer{} +enc := tomlv2.NewEncoder(&buf) +enc.SetIndentTables(true) +enc.Encode(data) +fmt.Println("v2 Encoder:\n" + string(buf.Bytes())) + +// Output: +// v1: +// +// [table] +// key = "value" +// +// v2: +// [table] +// key = 'value' +// +// +// v2 Encoder: +// [table] +// key = 'value' +``` + +[sit]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Encoder.SetIndentTables + +#### Keys and strings are single quoted + +V1 always uses double quotes (`"`) around strings and keys that cannot be +represented bare (unquoted). V2 uses single quotes instead by default (`'`), +unless a character cannot be represented, then falls back to double quotes. As a +result of this change, `Encoder.QuoteMapKeys` has been removed, as it is not +useful anymore. + +There is no way to make v2 encoder behave like v1. + +#### `TextMarshaler` emits as a string, not TOML + +Types that implement [`encoding.TextMarshaler`][tm] can emit arbitrary TOML in +v1. The encoder would append the result to the output directly. In v2 the result +is wrapped in a string. As a result, this interface cannot be implemented by the +root object. + +There is no way to make v2 encoder behave like v1. + +[tm]: https://golang.org/pkg/encoding/#TextMarshaler + +#### `Encoder.CompactComments` has been removed + +Emitting compact comments is now the default behavior of go-toml. This option +is not necessary anymore. + +#### Struct tags have been merged + +V1 used to provide multiple struct tags: `comment`, `commented`, `multiline`, +`toml`, and `omitempty`. To behave more like the standard library, v2 has merged +`toml`, `multiline`, and `omitempty`. For example: + +```go +type doc struct { + // v1 + F string `toml:"field" multiline:"true" omitempty:"true"` + // v2 + F string `toml:"field,multiline,omitempty"` +} +``` + +Has a result, the `Encoder.SetTag*` methods have been removed, as there is just +one tag now. + + +#### `commented` tag has been removed + +There is no replacement for the `commented` tag. This feature would be better +suited in a proper document model for go-toml v2, which has been [cut from +scope][nodoc] at the moment. + +#### `Encoder.ArraysWithOneElementPerLine` has been renamed + +The new name is `Encoder.SetArraysMultiline`. The behavior should be the same. + +#### `Encoder.Indentation` has been renamed + +The new name is `Encoder.SetIndentSymbol`. The behavior should be the same. + + +#### Embedded structs behave like stdlib + +V1 defaults to merging embedded struct fields into the embedding struct. This +behavior was unexpected because it does not follow the standard library. To +avoid breaking backward compatibility, the `Encoder.PromoteAnonymous` method was +added to make the encoder behave correctly. Given backward compatibility is not +a problem anymore, v2 does the right thing by default: it follows the behavior +of `encoding/json`. `Encoder.PromoteAnonymous` has been removed. + +[nodoc]: https://github.com/pelletier/go-toml/discussions/506#discussioncomment-1526038 + +### `query` + +go-toml v1 provided the [`go-toml/query`][query] package. It allowed to run +JSONPath-style queries on TOML files. This feature is not available in v2. For a +replacement, check out [dasel][dasel]. + +This package has been removed because it was essentially not supported anymore +(last commit May 2020), increased the complexity of the code base, and more +complete solutions exist out there. + +[query]: https://github.com/pelletier/go-toml/tree/f99d6bbca119636aeafcf351ee52b3d202782627/query +[dasel]: https://github.com/TomWright/dasel + +## License + +The MIT License (MIT). Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/v2/SECURITY.md b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md new file mode 100644 index 00000000..b2f21cfc --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +Use this section to tell people about which versions of your project are +currently being supported with security updates. + +| Version | Supported | +| ---------- | ------------------ | +| Latest 2.x | :white_check_mark: | +| All 1.x | :x: | +| All 0.x | :x: | + +## Reporting a Vulnerability + +Email a vulnerability report to `security@pelletier.codes`. Make sure to include +as many details as possible to reproduce the vulnerability. This is a +side-project: I will try to get back to you as quickly as possible, time +permitting in my personal life. Providing a working patch helps very much! diff --git a/vendor/github.com/pelletier/go-toml/v2/ci.sh b/vendor/github.com/pelletier/go-toml/v2/ci.sh new file mode 100644 index 00000000..d916c5f2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/ci.sh @@ -0,0 +1,279 @@ +#!/usr/bin/env bash + + +stderr() { + echo "$@" 1>&2 +} + +usage() { + b=$(basename "$0") + echo $b: ERROR: "$@" 1>&2 + + cat 1>&2 <<EOF + +DESCRIPTION + + $(basename "$0") is the script to run continuous integration commands for + go-toml on unix. + + Requires Go and Git to be available in the PATH. Expects to be ran from the + root of go-toml's Git repository. + +USAGE + + $b COMMAND [OPTIONS...] + +COMMANDS + +benchmark [OPTIONS...] [BRANCH] + + Run benchmarks. + + ARGUMENTS + + BRANCH Optional. Defines which Git branch to use when running + benchmarks. + + OPTIONS + + -d Compare benchmarks of HEAD with BRANCH using benchstats. In + this form the BRANCH argument is required. + + -a Compare benchmarks of HEAD against go-toml v1 and + BurntSushi/toml. + + -html When used with -a, emits the output as HTML, ready to be + embedded in the README. + +coverage [OPTIONS...] [BRANCH] + + Generates code coverage. + + ARGUMENTS + + BRANCH Optional. Defines which Git branch to use when reporting + coverage. Defaults to HEAD. + + OPTIONS + + -d Compare coverage of HEAD with the one of BRANCH. In this form, + the BRANCH argument is required. Exit code is non-zero when + coverage percentage decreased. +EOF + exit 1 +} + +cover() { + branch="${1}" + dir="$(mktemp -d)" + + stderr "Executing coverage for ${branch} at ${dir}" + + if [ "${branch}" = "HEAD" ]; then + cp -r . "${dir}/" + else + git worktree add "$dir" "$branch" + fi + + pushd "$dir" + go test -covermode=atomic -coverpkg=./... -coverprofile=coverage.out.tmp ./... + cat coverage.out.tmp | grep -v testsuite | grep -v tomltestgen | grep -v gotoml-test-decoder > coverage.out + go tool cover -func=coverage.out + popd + + if [ "${branch}" != "HEAD" ]; then + git worktree remove --force "$dir" + fi +} + +coverage() { + case "$1" in + -d) + shift + target="${1?Need to provide a target branch argument}" + + output_dir="$(mktemp -d)" + target_out="${output_dir}/target.txt" + head_out="${output_dir}/head.txt" + + cover "${target}" > "${target_out}" + cover "HEAD" > "${head_out}" + + cat "${target_out}" + cat "${head_out}" + + echo "" + + target_pct="$(tail -n2 ${target_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%.*/\1/')" + head_pct="$(tail -n2 ${head_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%/\1/')" + echo "Results: ${target} ${target_pct}% HEAD ${head_pct}%" + + delta_pct=$(echo "$head_pct - $target_pct" | bc -l) + echo "Delta: ${delta_pct}" + + if [[ $delta_pct = \-* ]]; then + echo "Regression!"; + + target_diff="${output_dir}/target.diff.txt" + head_diff="${output_dir}/head.diff.txt" + cat "${target_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${target_diff}" + cat "${head_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${head_diff}" + + diff --side-by-side --suppress-common-lines "${target_diff}" "${head_diff}" + return 1 + fi + return 0 + ;; + esac + + cover "${1-HEAD}" +} + +bench() { + branch="${1}" + out="${2}" + replace="${3}" + dir="$(mktemp -d)" + + stderr "Executing benchmark for ${branch} at ${dir}" + + if [ "${branch}" = "HEAD" ]; then + cp -r . "${dir}/" + else + git worktree add "$dir" "$branch" + fi + + pushd "$dir" + + if [ "${replace}" != "" ]; then + find ./benchmark/ -iname '*.go' -exec sed -i -E "s|github.com/pelletier/go-toml/v2|${replace}|g" {} \; + go get "${replace}" + fi + + export GOMAXPROCS=2 + nice -n -19 taskset --cpu-list 0,1 go test '-bench=^Benchmark(Un)?[mM]arshal' -count=5 -run=Nothing ./... | tee "${out}" + popd + + if [ "${branch}" != "HEAD" ]; then + git worktree remove --force "$dir" + fi +} + +fmktemp() { + if mktemp --version|grep GNU >/dev/null; then + mktemp --suffix=-$1; + else + mktemp -t $1; + fi +} + +benchstathtml() { +python3 - $1 <<'EOF' +import sys + +lines = [] +stop = False + +with open(sys.argv[1]) as f: + for line in f.readlines(): + line = line.strip() + if line == "": + stop = True + if not stop: + lines.append(line.split(',')) + +results = [] +for line in reversed(lines[1:]): + v2 = float(line[1]) + results.append([ + line[0].replace("-32", ""), + "%.1fx" % (float(line[3])/v2), # v1 + "%.1fx" % (float(line[5])/v2), # bs + ]) +# move geomean to the end +results.append(results[0]) +del results[0] + + +def printtable(data): + print(""" +<table> + <thead> + <tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr> + </thead> + <tbody>""") + + for r in data: + print(" <tr><td>{}</td><td>{}</td><td>{}</td></tr>".format(*r)) + + print(""" </tbody> +</table>""") + + +def match(x): + return "ReferenceFile" in x[0] or "HugoFrontMatter" in x[0] + +above = [x for x in results if match(x)] +below = [x for x in results if not match(x)] + +printtable(above) +print("<details><summary>See more</summary>") +print("""<p>The table above has the results of the most common use-cases. The table below +contains the results of all benchmarks, including unrealistic ones. It is +provided for completeness.</p>""") +printtable(below) +print('<p>This table can be generated with <code>./ci.sh benchmark -a -html</code>.</p>') +print("</details>") + +EOF +} + +benchmark() { + case "$1" in + -d) + shift + target="${1?Need to provide a target branch argument}" + + old=`fmktemp ${target}` + bench "${target}" "${old}" + + new=`fmktemp HEAD` + bench HEAD "${new}" + + benchstat "${old}" "${new}" + return 0 + ;; + -a) + shift + + v2stats=`fmktemp go-toml-v2` + bench HEAD "${v2stats}" "github.com/pelletier/go-toml/v2" + v1stats=`fmktemp go-toml-v1` + bench HEAD "${v1stats}" "github.com/pelletier/go-toml" + bsstats=`fmktemp bs-toml` + bench HEAD "${bsstats}" "github.com/BurntSushi/toml" + + cp "${v2stats}" go-toml-v2.txt + cp "${v1stats}" go-toml-v1.txt + cp "${bsstats}" bs-toml.txt + + if [ "$1" = "-html" ]; then + tmpcsv=`fmktemp csv` + benchstat -csv -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv + benchstathtml $tmpcsv + else + benchstat -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt + fi + + rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt + return $? + esac + + bench "${1-HEAD}" `mktemp` +} + +case "$1" in + coverage) shift; coverage $@;; + benchmark) shift; benchmark $@;; + *) usage "bad argument $1";; +esac diff --git a/vendor/github.com/pelletier/go-toml/v2/decode.go b/vendor/github.com/pelletier/go-toml/v2/decode.go new file mode 100644 index 00000000..4af96536 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/decode.go @@ -0,0 +1,544 @@ +package toml + +import ( + "fmt" + "math" + "strconv" + "time" +) + +func parseInteger(b []byte) (int64, error) { + if len(b) > 2 && b[0] == '0' { + switch b[1] { + case 'x': + return parseIntHex(b) + case 'b': + return parseIntBin(b) + case 'o': + return parseIntOct(b) + default: + panic(fmt.Errorf("invalid base '%c', should have been checked by scanIntOrFloat", b[1])) + } + } + + return parseIntDec(b) +} + +func parseLocalDate(b []byte) (LocalDate, error) { + // full-date = date-fullyear "-" date-month "-" date-mday + // date-fullyear = 4DIGIT + // date-month = 2DIGIT ; 01-12 + // date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year + var date LocalDate + + if len(b) != 10 || b[4] != '-' || b[7] != '-' { + return date, newDecodeError(b, "dates are expected to have the format YYYY-MM-DD") + } + + var err error + + date.Year, err = parseDecimalDigits(b[0:4]) + if err != nil { + return LocalDate{}, err + } + + date.Month, err = parseDecimalDigits(b[5:7]) + if err != nil { + return LocalDate{}, err + } + + date.Day, err = parseDecimalDigits(b[8:10]) + if err != nil { + return LocalDate{}, err + } + + if !isValidDate(date.Year, date.Month, date.Day) { + return LocalDate{}, newDecodeError(b, "impossible date") + } + + return date, nil +} + +func parseDecimalDigits(b []byte) (int, error) { + v := 0 + + for i, c := range b { + if c < '0' || c > '9' { + return 0, newDecodeError(b[i:i+1], "expected digit (0-9)") + } + v *= 10 + v += int(c - '0') + } + + return v, nil +} + +func parseDateTime(b []byte) (time.Time, error) { + // offset-date-time = full-date time-delim full-time + // full-time = partial-time time-offset + // time-offset = "Z" / time-numoffset + // time-numoffset = ( "+" / "-" ) time-hour ":" time-minute + + dt, b, err := parseLocalDateTime(b) + if err != nil { + return time.Time{}, err + } + + var zone *time.Location + + if len(b) == 0 { + // parser should have checked that when assigning the date time node + panic("date time should have a timezone") + } + + if b[0] == 'Z' || b[0] == 'z' { + b = b[1:] + zone = time.UTC + } else { + const dateTimeByteLen = 6 + if len(b) != dateTimeByteLen { + return time.Time{}, newDecodeError(b, "invalid date-time timezone") + } + var direction int + switch b[0] { + case '-': + direction = -1 + case '+': + direction = +1 + default: + return time.Time{}, newDecodeError(b[:1], "invalid timezone offset character") + } + + if b[3] != ':' { + return time.Time{}, newDecodeError(b[3:4], "expected a : separator") + } + + hours, err := parseDecimalDigits(b[1:3]) + if err != nil { + return time.Time{}, err + } + if hours > 23 { + return time.Time{}, newDecodeError(b[:1], "invalid timezone offset hours") + } + + minutes, err := parseDecimalDigits(b[4:6]) + if err != nil { + return time.Time{}, err + } + if minutes > 59 { + return time.Time{}, newDecodeError(b[:1], "invalid timezone offset minutes") + } + + seconds := direction * (hours*3600 + minutes*60) + if seconds == 0 { + zone = time.UTC + } else { + zone = time.FixedZone("", seconds) + } + b = b[dateTimeByteLen:] + } + + if len(b) > 0 { + return time.Time{}, newDecodeError(b, "extra bytes at the end of the timezone") + } + + t := time.Date( + dt.Year, + time.Month(dt.Month), + dt.Day, + dt.Hour, + dt.Minute, + dt.Second, + dt.Nanosecond, + zone) + + return t, nil +} + +func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) { + var dt LocalDateTime + + const localDateTimeByteMinLen = 11 + if len(b) < localDateTimeByteMinLen { + return dt, nil, newDecodeError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]") + } + + date, err := parseLocalDate(b[:10]) + if err != nil { + return dt, nil, err + } + dt.LocalDate = date + + sep := b[10] + if sep != 'T' && sep != ' ' && sep != 't' { + return dt, nil, newDecodeError(b[10:11], "datetime separator is expected to be T or a space") + } + + t, rest, err := parseLocalTime(b[11:]) + if err != nil { + return dt, nil, err + } + dt.LocalTime = t + + return dt, rest, nil +} + +// parseLocalTime is a bit different because it also returns the remaining +// []byte that is didn't need. This is to allow parseDateTime to parse those +// remaining bytes as a timezone. +func parseLocalTime(b []byte) (LocalTime, []byte, error) { + var ( + nspow = [10]int{0, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1e0} + t LocalTime + ) + + // check if b matches to have expected format HH:MM:SS[.NNNNNN] + const localTimeByteLen = 8 + if len(b) < localTimeByteLen { + return t, nil, newDecodeError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]") + } + + var err error + + t.Hour, err = parseDecimalDigits(b[0:2]) + if err != nil { + return t, nil, err + } + + if t.Hour > 23 { + return t, nil, newDecodeError(b[0:2], "hour cannot be greater 23") + } + if b[2] != ':' { + return t, nil, newDecodeError(b[2:3], "expecting colon between hours and minutes") + } + + t.Minute, err = parseDecimalDigits(b[3:5]) + if err != nil { + return t, nil, err + } + if t.Minute > 59 { + return t, nil, newDecodeError(b[3:5], "minutes cannot be greater 59") + } + if b[5] != ':' { + return t, nil, newDecodeError(b[5:6], "expecting colon between minutes and seconds") + } + + t.Second, err = parseDecimalDigits(b[6:8]) + if err != nil { + return t, nil, err + } + + if t.Second > 60 { + return t, nil, newDecodeError(b[6:8], "seconds cannot be greater 60") + } + + b = b[8:] + + if len(b) >= 1 && b[0] == '.' { + frac := 0 + precision := 0 + digits := 0 + + for i, c := range b[1:] { + if !isDigit(c) { + if i == 0 { + return t, nil, newDecodeError(b[0:1], "need at least one digit after fraction point") + } + break + } + digits++ + + const maxFracPrecision = 9 + if i >= maxFracPrecision { + // go-toml allows decoding fractional seconds + // beyond the supported precision of 9 + // digits. It truncates the fractional component + // to the supported precision and ignores the + // remaining digits. + // + // https://github.com/pelletier/go-toml/discussions/707 + continue + } + + frac *= 10 + frac += int(c - '0') + precision++ + } + + if precision == 0 { + return t, nil, newDecodeError(b[:1], "nanoseconds need at least one digit") + } + + t.Nanosecond = frac * nspow[precision] + t.Precision = precision + + return t, b[1+digits:], nil + } + return t, b, nil +} + +//nolint:cyclop +func parseFloat(b []byte) (float64, error) { + if len(b) == 4 && (b[0] == '+' || b[0] == '-') && b[1] == 'n' && b[2] == 'a' && b[3] == 'n' { + return math.NaN(), nil + } + + cleaned, err := checkAndRemoveUnderscoresFloats(b) + if err != nil { + return 0, err + } + + if cleaned[0] == '.' { + return 0, newDecodeError(b, "float cannot start with a dot") + } + + if cleaned[len(cleaned)-1] == '.' { + return 0, newDecodeError(b, "float cannot end with a dot") + } + + dotAlreadySeen := false + for i, c := range cleaned { + if c == '.' { + if dotAlreadySeen { + return 0, newDecodeError(b[i:i+1], "float can have at most one decimal point") + } + if !isDigit(cleaned[i-1]) { + return 0, newDecodeError(b[i-1:i+1], "float decimal point must be preceded by a digit") + } + if !isDigit(cleaned[i+1]) { + return 0, newDecodeError(b[i:i+2], "float decimal point must be followed by a digit") + } + dotAlreadySeen = true + } + } + + start := 0 + if cleaned[0] == '+' || cleaned[0] == '-' { + start = 1 + } + if cleaned[start] == '0' && isDigit(cleaned[start+1]) { + return 0, newDecodeError(b, "float integer part cannot have leading zeroes") + } + + f, err := strconv.ParseFloat(string(cleaned), 64) + if err != nil { + return 0, newDecodeError(b, "unable to parse float: %w", err) + } + + return f, nil +} + +func parseIntHex(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) + if err != nil { + return 0, err + } + + i, err := strconv.ParseInt(string(cleaned), 16, 64) + if err != nil { + return 0, newDecodeError(b, "couldn't parse hexadecimal number: %w", err) + } + + return i, nil +} + +func parseIntOct(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) + if err != nil { + return 0, err + } + + i, err := strconv.ParseInt(string(cleaned), 8, 64) + if err != nil { + return 0, newDecodeError(b, "couldn't parse octal number: %w", err) + } + + return i, nil +} + +func parseIntBin(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) + if err != nil { + return 0, err + } + + i, err := strconv.ParseInt(string(cleaned), 2, 64) + if err != nil { + return 0, newDecodeError(b, "couldn't parse binary number: %w", err) + } + + return i, nil +} + +func isSign(b byte) bool { + return b == '+' || b == '-' +} + +func parseIntDec(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b) + if err != nil { + return 0, err + } + + startIdx := 0 + + if isSign(cleaned[0]) { + startIdx++ + } + + if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' { + return 0, newDecodeError(b, "leading zero not allowed on decimal number") + } + + i, err := strconv.ParseInt(string(cleaned), 10, 64) + if err != nil { + return 0, newDecodeError(b, "couldn't parse decimal number: %w", err) + } + + return i, nil +} + +func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) { + start := 0 + if b[start] == '+' || b[start] == '-' { + start++ + } + + if len(b) == start { + return b, nil + } + + if b[start] == '_' { + return nil, newDecodeError(b[start:start+1], "number cannot start with underscore") + } + + if b[len(b)-1] == '_' { + return nil, newDecodeError(b[len(b)-1:], "number cannot end with underscore") + } + + // fast path + i := 0 + for ; i < len(b); i++ { + if b[i] == '_' { + break + } + } + if i == len(b) { + return b, nil + } + + before := false + cleaned := make([]byte, i, len(b)) + copy(cleaned, b) + + for i++; i < len(b); i++ { + c := b[i] + if c == '_' { + if !before { + return nil, newDecodeError(b[i-1:i+1], "number must have at least one digit between underscores") + } + before = false + } else { + before = true + cleaned = append(cleaned, c) + } + } + + return cleaned, nil +} + +func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) { + if b[0] == '_' { + return nil, newDecodeError(b[0:1], "number cannot start with underscore") + } + + if b[len(b)-1] == '_' { + return nil, newDecodeError(b[len(b)-1:], "number cannot end with underscore") + } + + // fast path + i := 0 + for ; i < len(b); i++ { + if b[i] == '_' { + break + } + } + if i == len(b) { + return b, nil + } + + before := false + cleaned := make([]byte, 0, len(b)) + + for i := 0; i < len(b); i++ { + c := b[i] + + switch c { + case '_': + if !before { + return nil, newDecodeError(b[i-1:i+1], "number must have at least one digit between underscores") + } + if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') { + return nil, newDecodeError(b[i+1:i+2], "cannot have underscore before exponent") + } + before = false + case '+', '-': + // signed exponents + cleaned = append(cleaned, c) + before = false + case 'e', 'E': + if i < len(b)-1 && b[i+1] == '_' { + return nil, newDecodeError(b[i+1:i+2], "cannot have underscore after exponent") + } + cleaned = append(cleaned, c) + case '.': + if i < len(b)-1 && b[i+1] == '_' { + return nil, newDecodeError(b[i+1:i+2], "cannot have underscore after decimal point") + } + if i > 0 && b[i-1] == '_' { + return nil, newDecodeError(b[i-1:i], "cannot have underscore before decimal point") + } + cleaned = append(cleaned, c) + default: + before = true + cleaned = append(cleaned, c) + } + } + + return cleaned, nil +} + +// isValidDate checks if a provided date is a date that exists. +func isValidDate(year int, month int, day int) bool { + return month > 0 && month < 13 && day > 0 && day <= daysIn(month, year) +} + +// daysBefore[m] counts the number of days in a non-leap year +// before month m begins. There is an entry for m=12, counting +// the number of days before January of next year (365). +var daysBefore = [...]int32{ + 0, + 31, + 31 + 28, + 31 + 28 + 31, + 31 + 28 + 31 + 30, + 31 + 28 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31, +} + +func daysIn(m int, year int) int { + if m == 2 && isLeap(year) { + return 29 + } + return int(daysBefore[m] - daysBefore[m-1]) +} + +func isLeap(year int) bool { + return year%4 == 0 && (year%100 != 0 || year%400 == 0) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/doc.go b/vendor/github.com/pelletier/go-toml/v2/doc.go new file mode 100644 index 00000000..b7bc599b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/doc.go @@ -0,0 +1,2 @@ +// Package toml is a library to read and write TOML documents. +package toml diff --git a/vendor/github.com/pelletier/go-toml/v2/errors.go b/vendor/github.com/pelletier/go-toml/v2/errors.go new file mode 100644 index 00000000..5e6635c3 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/errors.go @@ -0,0 +1,269 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + + "github.com/pelletier/go-toml/v2/internal/danger" +) + +// DecodeError represents an error encountered during the parsing or decoding +// of a TOML document. +// +// In addition to the error message, it contains the position in the document +// where it happened, as well as a human-readable representation that shows +// where the error occurred in the document. +type DecodeError struct { + message string + line int + column int + key Key + + human string +} + +// StrictMissingError occurs in a TOML document that does not have a +// corresponding field in the target value. It contains all the missing fields +// in Errors. +// +// Emitted by Decoder when DisallowUnknownFields() was called. +type StrictMissingError struct { + // One error per field that could not be found. + Errors []DecodeError +} + +// Error returns the canonical string for this error. +func (s *StrictMissingError) Error() string { + return "strict mode: fields in the document are missing in the target struct" +} + +// String returns a human readable description of all errors. +func (s *StrictMissingError) String() string { + var buf strings.Builder + + for i, e := range s.Errors { + if i > 0 { + buf.WriteString("\n---\n") + } + + buf.WriteString(e.String()) + } + + return buf.String() +} + +type Key []string + +// internal version of DecodeError that is used as the base to create a +// DecodeError with full context. +type decodeError struct { + highlight []byte + message string + key Key // optional +} + +func (de *decodeError) Error() string { + return de.message +} + +func newDecodeError(highlight []byte, format string, args ...interface{}) error { + return &decodeError{ + highlight: highlight, + message: fmt.Errorf(format, args...).Error(), + } +} + +// Error returns the error message contained in the DecodeError. +func (e *DecodeError) Error() string { + return "toml: " + e.message +} + +// String returns the human-readable contextualized error. This string is multi-line. +func (e *DecodeError) String() string { + return e.human +} + +// Position returns the (line, column) pair indicating where the error +// occurred in the document. Positions are 1-indexed. +func (e *DecodeError) Position() (row int, column int) { + return e.line, e.column +} + +// Key that was being processed when the error occurred. The key is present only +// if this DecodeError is part of a StrictMissingError. +func (e *DecodeError) Key() Key { + return e.key +} + +// decodeErrorFromHighlight creates a DecodeError referencing a highlighted +// range of bytes from document. +// +// highlight needs to be a sub-slice of document, or this function panics. +// +// The function copies all bytes used in DecodeError, so that document and +// highlight can be freely deallocated. +//nolint:funlen +func wrapDecodeError(document []byte, de *decodeError) *DecodeError { + offset := danger.SubsliceOffset(document, de.highlight) + + errMessage := de.Error() + errLine, errColumn := positionAtEnd(document[:offset]) + before, after := linesOfContext(document, de.highlight, offset, 3) + + var buf strings.Builder + + maxLine := errLine + len(after) - 1 + lineColumnWidth := len(strconv.Itoa(maxLine)) + + // Write the lines of context strictly before the error. + for i := len(before) - 1; i > 0; i-- { + line := errLine - i + buf.WriteString(formatLineNumber(line, lineColumnWidth)) + buf.WriteString("|") + + if len(before[i]) > 0 { + buf.WriteString(" ") + buf.Write(before[i]) + } + + buf.WriteRune('\n') + } + + // Write the document line that contains the error. + + buf.WriteString(formatLineNumber(errLine, lineColumnWidth)) + buf.WriteString("| ") + + if len(before) > 0 { + buf.Write(before[0]) + } + + buf.Write(de.highlight) + + if len(after) > 0 { + buf.Write(after[0]) + } + + buf.WriteRune('\n') + + // Write the line with the error message itself (so it does not have a line + // number). + + buf.WriteString(strings.Repeat(" ", lineColumnWidth)) + buf.WriteString("| ") + + if len(before) > 0 { + buf.WriteString(strings.Repeat(" ", len(before[0]))) + } + + buf.WriteString(strings.Repeat("~", len(de.highlight))) + + if len(errMessage) > 0 { + buf.WriteString(" ") + buf.WriteString(errMessage) + } + + // Write the lines of context strictly after the error. + + for i := 1; i < len(after); i++ { + buf.WriteRune('\n') + line := errLine + i + buf.WriteString(formatLineNumber(line, lineColumnWidth)) + buf.WriteString("|") + + if len(after[i]) > 0 { + buf.WriteString(" ") + buf.Write(after[i]) + } + } + + return &DecodeError{ + message: errMessage, + line: errLine, + column: errColumn, + key: de.key, + human: buf.String(), + } +} + +func formatLineNumber(line int, width int) string { + format := "%" + strconv.Itoa(width) + "d" + + return fmt.Sprintf(format, line) +} + +func linesOfContext(document []byte, highlight []byte, offset int, linesAround int) ([][]byte, [][]byte) { + return beforeLines(document, offset, linesAround), afterLines(document, highlight, offset, linesAround) +} + +func beforeLines(document []byte, offset int, linesAround int) [][]byte { + var beforeLines [][]byte + + // Walk the document backward from the highlight to find previous lines + // of context. + rest := document[:offset] +backward: + for o := len(rest) - 1; o >= 0 && len(beforeLines) <= linesAround && len(rest) > 0; { + switch { + case rest[o] == '\n': + // handle individual lines + beforeLines = append(beforeLines, rest[o+1:]) + rest = rest[:o] + o = len(rest) - 1 + case o == 0: + // add the first line only if it's non-empty + beforeLines = append(beforeLines, rest) + + break backward + default: + o-- + } + } + + return beforeLines +} + +func afterLines(document []byte, highlight []byte, offset int, linesAround int) [][]byte { + var afterLines [][]byte + + // Walk the document forward from the highlight to find the following + // lines of context. + rest := document[offset+len(highlight):] +forward: + for o := 0; o < len(rest) && len(afterLines) <= linesAround; { + switch { + case rest[o] == '\n': + // handle individual lines + afterLines = append(afterLines, rest[:o]) + rest = rest[o+1:] + o = 0 + + case o == len(rest)-1: + // add last line only if it's non-empty + afterLines = append(afterLines, rest) + + break forward + default: + o++ + } + } + + return afterLines +} + +func positionAtEnd(b []byte) (row int, column int) { + row = 1 + column = 1 + + for _, c := range b { + if c == '\n' { + row++ + column = 1 + } else { + column++ + } + } + + return +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go b/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go new file mode 100644 index 00000000..33c7f915 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go @@ -0,0 +1,144 @@ +package ast + +import ( + "fmt" + "unsafe" + + "github.com/pelletier/go-toml/v2/internal/danger" +) + +// Iterator starts uninitialized, you need to call Next() first. +// +// For example: +// +// it := n.Children() +// for it.Next() { +// it.Node() +// } +type Iterator struct { + started bool + node *Node +} + +// Next moves the iterator forward and returns true if points to a +// node, false otherwise. +func (c *Iterator) Next() bool { + if !c.started { + c.started = true + } else if c.node.Valid() { + c.node = c.node.Next() + } + return c.node.Valid() +} + +// IsLast returns true if the current node of the iterator is the last +// one. Subsequent call to Next() will return false. +func (c *Iterator) IsLast() bool { + return c.node.next == 0 +} + +// Node returns a copy of the node pointed at by the iterator. +func (c *Iterator) Node() *Node { + return c.node +} + +// Root contains a full AST. +// +// It is immutable once constructed with Builder. +type Root struct { + nodes []Node +} + +// Iterator over the top level nodes. +func (r *Root) Iterator() Iterator { + it := Iterator{} + if len(r.nodes) > 0 { + it.node = &r.nodes[0] + } + return it +} + +func (r *Root) at(idx Reference) *Node { + return &r.nodes[idx] +} + +// Arrays have one child per element in the array. InlineTables have +// one child per key-value pair in the table. KeyValues have at least +// two children. The first one is the value. The rest make a +// potentially dotted key. Table and Array table have one child per +// element of the key they represent (same as KeyValue, but without +// the last node being the value). +type Node struct { + Kind Kind + Raw Range // Raw bytes from the input. + Data []byte // Node value (either allocated or referencing the input). + + // References to other nodes, as offsets in the backing array + // from this node. References can go backward, so those can be + // negative. + next int // 0 if last element + child int // 0 if no child +} + +type Range struct { + Offset uint32 + Length uint32 +} + +// Next returns a copy of the next node, or an invalid Node if there +// is no next node. +func (n *Node) Next() *Node { + if n.next == 0 { + return nil + } + ptr := unsafe.Pointer(n) + size := unsafe.Sizeof(Node{}) + return (*Node)(danger.Stride(ptr, size, n.next)) +} + +// Child returns a copy of the first child node of this node. Other +// children can be accessed calling Next on the first child. Returns +// an invalid Node if there is none. +func (n *Node) Child() *Node { + if n.child == 0 { + return nil + } + ptr := unsafe.Pointer(n) + size := unsafe.Sizeof(Node{}) + return (*Node)(danger.Stride(ptr, size, n.child)) +} + +// Valid returns true if the node's kind is set (not to Invalid). +func (n *Node) Valid() bool { + return n != nil +} + +// Key returns the child nodes making the Key on a supported +// node. Panics otherwise. They are guaranteed to be all be of the +// Kind Key. A simple key would return just one element. +func (n *Node) Key() Iterator { + switch n.Kind { + case KeyValue: + value := n.Child() + if !value.Valid() { + panic(fmt.Errorf("KeyValue should have at least two children")) + } + return Iterator{node: value.Next()} + case Table, ArrayTable: + return Iterator{node: n.Child()} + default: + panic(fmt.Errorf("Key() is not supported on a %s", n.Kind)) + } +} + +// Value returns a pointer to the value node of a KeyValue. +// Guaranteed to be non-nil. Panics if not called on a KeyValue node, +// or if the Children are malformed. +func (n *Node) Value() *Node { + return n.Child() +} + +// Children returns an iterator over a node's children. +func (n *Node) Children() Iterator { + return Iterator{node: n.Child()} +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go b/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go new file mode 100644 index 00000000..120f16e5 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go @@ -0,0 +1,51 @@ +package ast + +type Reference int + +const InvalidReference Reference = -1 + +func (r Reference) Valid() bool { + return r != InvalidReference +} + +type Builder struct { + tree Root + lastIdx int +} + +func (b *Builder) Tree() *Root { + return &b.tree +} + +func (b *Builder) NodeAt(ref Reference) *Node { + return b.tree.at(ref) +} + +func (b *Builder) Reset() { + b.tree.nodes = b.tree.nodes[:0] + b.lastIdx = 0 +} + +func (b *Builder) Push(n Node) Reference { + b.lastIdx = len(b.tree.nodes) + b.tree.nodes = append(b.tree.nodes, n) + return Reference(b.lastIdx) +} + +func (b *Builder) PushAndChain(n Node) Reference { + newIdx := len(b.tree.nodes) + b.tree.nodes = append(b.tree.nodes, n) + if b.lastIdx >= 0 { + b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx + } + b.lastIdx = newIdx + return Reference(b.lastIdx) +} + +func (b *Builder) AttachChild(parent Reference, child Reference) { + b.tree.nodes[parent].child = int(child) - int(parent) +} + +func (b *Builder) Chain(from Reference, to Reference) { + b.tree.nodes[from].next = int(to) - int(from) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go b/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go new file mode 100644 index 00000000..2b50c67f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go @@ -0,0 +1,69 @@ +package ast + +import "fmt" + +type Kind int + +const ( + // meta + Invalid Kind = iota + Comment + Key + + // top level structures + Table + ArrayTable + KeyValue + + // containers values + Array + InlineTable + + // values + String + Bool + Float + Integer + LocalDate + LocalTime + LocalDateTime + DateTime +) + +func (k Kind) String() string { + switch k { + case Invalid: + return "Invalid" + case Comment: + return "Comment" + case Key: + return "Key" + case Table: + return "Table" + case ArrayTable: + return "ArrayTable" + case KeyValue: + return "KeyValue" + case Array: + return "Array" + case InlineTable: + return "InlineTable" + case String: + return "String" + case Bool: + return "Bool" + case Float: + return "Float" + case Integer: + return "Integer" + case LocalDate: + return "LocalDate" + case LocalTime: + return "LocalTime" + case LocalDateTime: + return "LocalDateTime" + case DateTime: + return "DateTime" + } + panic(fmt.Errorf("Kind.String() not implemented for '%d'", k)) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go b/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go new file mode 100644 index 00000000..e38e1131 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go @@ -0,0 +1,65 @@ +package danger + +import ( + "fmt" + "reflect" + "unsafe" +) + +const maxInt = uintptr(int(^uint(0) >> 1)) + +func SubsliceOffset(data []byte, subslice []byte) int { + datap := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + hlp := (*reflect.SliceHeader)(unsafe.Pointer(&subslice)) + + if hlp.Data < datap.Data { + panic(fmt.Errorf("subslice address (%d) is before data address (%d)", hlp.Data, datap.Data)) + } + offset := hlp.Data - datap.Data + + if offset > maxInt { + panic(fmt.Errorf("slice offset larger than int (%d)", offset)) + } + + intoffset := int(offset) + + if intoffset > datap.Len { + panic(fmt.Errorf("slice offset (%d) is farther than data length (%d)", intoffset, datap.Len)) + } + + if intoffset+hlp.Len > datap.Len { + panic(fmt.Errorf("slice ends (%d+%d) is farther than data length (%d)", intoffset, hlp.Len, datap.Len)) + } + + return intoffset +} + +func BytesRange(start []byte, end []byte) []byte { + if start == nil || end == nil { + panic("cannot call BytesRange with nil") + } + startp := (*reflect.SliceHeader)(unsafe.Pointer(&start)) + endp := (*reflect.SliceHeader)(unsafe.Pointer(&end)) + + if startp.Data > endp.Data { + panic(fmt.Errorf("start pointer address (%d) is after end pointer address (%d)", startp.Data, endp.Data)) + } + + l := startp.Len + endLen := int(endp.Data-startp.Data) + endp.Len + if endLen > l { + l = endLen + } + + if l > startp.Cap { + panic(fmt.Errorf("range length is larger than capacity")) + } + + return start[:l] +} + +func Stride(ptr unsafe.Pointer, size uintptr, offset int) unsafe.Pointer { + // TODO: replace with unsafe.Add when Go 1.17 is released + // https://github.com/golang/go/issues/40481 + return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset)) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go b/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go new file mode 100644 index 00000000..9d41c28a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go @@ -0,0 +1,23 @@ +package danger + +import ( + "reflect" + "unsafe" +) + +// typeID is used as key in encoder and decoder caches to enable using +// the optimize runtime.mapaccess2_fast64 function instead of the more +// expensive lookup if we were to use reflect.Type as map key. +// +// typeID holds the pointer to the reflect.Type value, which is unique +// in the program. +// +// https://github.com/segmentio/encoding/blob/master/json/codec.go#L59-L61 +type TypeID unsafe.Pointer + +func MakeTypeID(t reflect.Type) TypeID { + // reflect.Type has the fields: + // typ unsafe.Pointer + // ptr unsafe.Pointer + return TypeID((*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1]) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go new file mode 100644 index 00000000..7c148f48 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go @@ -0,0 +1,50 @@ +package tracker + +import ( + "github.com/pelletier/go-toml/v2/internal/ast" +) + +// KeyTracker is a tracker that keeps track of the current Key as the AST is +// walked. +type KeyTracker struct { + k []string +} + +// UpdateTable sets the state of the tracker with the AST table node. +func (t *KeyTracker) UpdateTable(node *ast.Node) { + t.reset() + t.Push(node) +} + +// UpdateArrayTable sets the state of the tracker with the AST array table node. +func (t *KeyTracker) UpdateArrayTable(node *ast.Node) { + t.reset() + t.Push(node) +} + +// Push the given key on the stack. +func (t *KeyTracker) Push(node *ast.Node) { + it := node.Key() + for it.Next() { + t.k = append(t.k, string(it.Node().Data)) + } +} + +// Pop key from stack. +func (t *KeyTracker) Pop(node *ast.Node) { + it := node.Key() + for it.Next() { + t.k = t.k[:len(t.k)-1] + } +} + +// Key returns the current key +func (t *KeyTracker) Key() []string { + k := make([]string, len(t.k)) + copy(k, t.k) + return k +} + +func (t *KeyTracker) reset() { + t.k = t.k[:0] +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go new file mode 100644 index 00000000..a7ee05ba --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go @@ -0,0 +1,356 @@ +package tracker + +import ( + "bytes" + "fmt" + "sync" + + "github.com/pelletier/go-toml/v2/internal/ast" +) + +type keyKind uint8 + +const ( + invalidKind keyKind = iota + valueKind + tableKind + arrayTableKind +) + +func (k keyKind) String() string { + switch k { + case invalidKind: + return "invalid" + case valueKind: + return "value" + case tableKind: + return "table" + case arrayTableKind: + return "array table" + } + panic("missing keyKind string mapping") +} + +// SeenTracker tracks which keys have been seen with which TOML type to flag +// duplicates and mismatches according to the spec. +// +// Each node in the visited tree is represented by an entry. Each entry has an +// identifier, which is provided by a counter. Entries are stored in the array +// entries. As new nodes are discovered (referenced for the first time in the +// TOML document), entries are created and appended to the array. An entry +// points to its parent using its id. +// +// To find whether a given key (sequence of []byte) has already been visited, +// the entries are linearly searched, looking for one with the right name and +// parent id. +// +// Given that all keys appear in the document after their parent, it is +// guaranteed that all descendants of a node are stored after the node, this +// speeds up the search process. +// +// When encountering [[array tables]], the descendants of that node are removed +// to allow that branch of the tree to be "rediscovered". To maintain the +// invariant above, the deletion process needs to keep the order of entries. +// This results in more copies in that case. +type SeenTracker struct { + entries []entry + currentIdx int +} + +var pool sync.Pool + +func (s *SeenTracker) reset() { + // Always contains a root element at index 0. + s.currentIdx = 0 + if len(s.entries) == 0 { + s.entries = make([]entry, 1, 2) + } else { + s.entries = s.entries[:1] + } + s.entries[0].child = -1 + s.entries[0].next = -1 +} + +type entry struct { + // Use -1 to indicate no child or no sibling. + child int + next int + + name []byte + kind keyKind + explicit bool + kv bool +} + +// Find the index of the child of parentIdx with key k. Returns -1 if +// it does not exist. +func (s *SeenTracker) find(parentIdx int, k []byte) int { + for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next { + if bytes.Equal(s.entries[i].name, k) { + return i + } + } + return -1 +} + +// Remove all descendants of node at position idx. +func (s *SeenTracker) clear(idx int) { + if idx >= len(s.entries) { + return + } + + for i := s.entries[idx].child; i >= 0; { + next := s.entries[i].next + n := s.entries[0].next + s.entries[0].next = i + s.entries[i].next = n + s.entries[i].name = nil + s.clear(i) + i = next + } + + s.entries[idx].child = -1 +} + +func (s *SeenTracker) create(parentIdx int, name []byte, kind keyKind, explicit bool, kv bool) int { + e := entry{ + child: -1, + next: s.entries[parentIdx].child, + + name: name, + kind: kind, + explicit: explicit, + kv: kv, + } + var idx int + if s.entries[0].next >= 0 { + idx = s.entries[0].next + s.entries[0].next = s.entries[idx].next + s.entries[idx] = e + } else { + idx = len(s.entries) + s.entries = append(s.entries, e) + } + + s.entries[parentIdx].child = idx + + return idx +} + +func (s *SeenTracker) setExplicitFlag(parentIdx int) { + for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next { + if s.entries[i].kv { + s.entries[i].explicit = true + s.entries[i].kv = false + } + s.setExplicitFlag(i) + } +} + +// CheckExpression takes a top-level node and checks that it does not contain +// keys that have been seen in previous calls, and validates that types are +// consistent. +func (s *SeenTracker) CheckExpression(node *ast.Node) error { + if s.entries == nil { + s.reset() + } + switch node.Kind { + case ast.KeyValue: + return s.checkKeyValue(node) + case ast.Table: + return s.checkTable(node) + case ast.ArrayTable: + return s.checkArrayTable(node) + default: + panic(fmt.Errorf("this should not be a top level node type: %s", node.Kind)) + } +} + +func (s *SeenTracker) checkTable(node *ast.Node) error { + if s.currentIdx >= 0 { + s.setExplicitFlag(s.currentIdx) + } + + it := node.Key() + + parentIdx := 0 + + // This code is duplicated in checkArrayTable. This is because factoring + // it in a function requires to copy the iterator, or allocate it to the + // heap, which is not cheap. + for it.Next() { + if it.IsLast() { + break + } + + k := it.Node().Data + + idx := s.find(parentIdx, k) + + if idx < 0 { + idx = s.create(parentIdx, k, tableKind, false, false) + } else { + entry := s.entries[idx] + if entry.kind == valueKind { + return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + } + } + parentIdx = idx + } + + k := it.Node().Data + idx := s.find(parentIdx, k) + + if idx >= 0 { + kind := s.entries[idx].kind + if kind != tableKind { + return fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind) + } + if s.entries[idx].explicit { + return fmt.Errorf("toml: table %s already exists", string(k)) + } + s.entries[idx].explicit = true + } else { + idx = s.create(parentIdx, k, tableKind, true, false) + } + + s.currentIdx = idx + + return nil +} + +func (s *SeenTracker) checkArrayTable(node *ast.Node) error { + if s.currentIdx >= 0 { + s.setExplicitFlag(s.currentIdx) + } + + it := node.Key() + + parentIdx := 0 + + for it.Next() { + if it.IsLast() { + break + } + + k := it.Node().Data + + idx := s.find(parentIdx, k) + + if idx < 0 { + idx = s.create(parentIdx, k, tableKind, false, false) + } else { + entry := s.entries[idx] + if entry.kind == valueKind { + return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + } + } + + parentIdx = idx + } + + k := it.Node().Data + idx := s.find(parentIdx, k) + + if idx >= 0 { + kind := s.entries[idx].kind + if kind != arrayTableKind { + return fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k)) + } + s.clear(idx) + } else { + idx = s.create(parentIdx, k, arrayTableKind, true, false) + } + + s.currentIdx = idx + + return nil +} + +func (s *SeenTracker) checkKeyValue(node *ast.Node) error { + parentIdx := s.currentIdx + it := node.Key() + + for it.Next() { + k := it.Node().Data + + idx := s.find(parentIdx, k) + + if idx < 0 { + idx = s.create(parentIdx, k, tableKind, false, true) + } else { + entry := s.entries[idx] + if it.IsLast() { + return fmt.Errorf("toml: key %s is already defined", string(k)) + } else if entry.kind != tableKind { + return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + } else if entry.explicit { + return fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) + } + } + + parentIdx = idx + } + + s.entries[parentIdx].kind = valueKind + + value := node.Value() + + switch value.Kind { + case ast.InlineTable: + return s.checkInlineTable(value) + case ast.Array: + return s.checkArray(value) + } + + return nil +} + +func (s *SeenTracker) checkArray(node *ast.Node) error { + it := node.Children() + for it.Next() { + n := it.Node() + switch n.Kind { + case ast.InlineTable: + err := s.checkInlineTable(n) + if err != nil { + return err + } + case ast.Array: + err := s.checkArray(n) + if err != nil { + return err + } + } + } + return nil +} + +func (s *SeenTracker) checkInlineTable(node *ast.Node) error { + if pool.New == nil { + pool.New = func() interface{} { + return &SeenTracker{} + } + } + + s = pool.Get().(*SeenTracker) + s.reset() + + it := node.Children() + for it.Next() { + n := it.Node() + err := s.checkKeyValue(n) + if err != nil { + return err + } + } + + // As inline tables are self-contained, the tracker does not + // need to retain the details of what they contain. The + // keyValue element that creates the inline table is kept to + // mark the presence of the inline table and prevent + // redefinition of its keys: check* functions cannot walk into + // a value. + pool.Put(s) + return nil +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go new file mode 100644 index 00000000..bf031739 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go @@ -0,0 +1 @@ +package tracker diff --git a/vendor/github.com/pelletier/go-toml/v2/localtime.go b/vendor/github.com/pelletier/go-toml/v2/localtime.go new file mode 100644 index 00000000..30a31dcb --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/localtime.go @@ -0,0 +1,120 @@ +package toml + +import ( + "fmt" + "strings" + "time" +) + +// LocalDate represents a calendar day in no specific timezone. +type LocalDate struct { + Year int + Month int + Day int +} + +// AsTime converts d into a specific time instance at midnight in zone. +func (d LocalDate) AsTime(zone *time.Location) time.Time { + return time.Date(d.Year, time.Month(d.Month), d.Day, 0, 0, 0, 0, zone) +} + +// String returns RFC 3339 representation of d. +func (d LocalDate) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// MarshalText returns RFC 3339 representation of d. +func (d LocalDate) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses b using RFC 3339 to fill d. +func (d *LocalDate) UnmarshalText(b []byte) error { + res, err := parseLocalDate(b) + if err != nil { + return err + } + *d = res + return nil +} + +// LocalTime represents a time of day of no specific day in no specific +// timezone. +type LocalTime struct { + Hour int // Hour of the day: [0; 24[ + Minute int // Minute of the hour: [0; 60[ + Second int // Second of the minute: [0; 60[ + Nanosecond int // Nanoseconds within the second: [0, 1000000000[ + Precision int // Number of digits to display for Nanosecond. +} + +// String returns RFC 3339 representation of d. +// If d.Nanosecond and d.Precision are zero, the time won't have a nanosecond +// component. If d.Nanosecond > 0 but d.Precision = 0, then the minimum number +// of digits for nanoseconds is provided. +func (d LocalTime) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", d.Hour, d.Minute, d.Second) + + if d.Precision > 0 { + s += fmt.Sprintf(".%09d", d.Nanosecond)[:d.Precision+1] + } else if d.Nanosecond > 0 { + // Nanoseconds are specified, but precision is not provided. Use the + // minimum. + s += strings.Trim(fmt.Sprintf(".%09d", d.Nanosecond), "0") + } + + return s +} + +// MarshalText returns RFC 3339 representation of d. +func (d LocalTime) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses b using RFC 3339 to fill d. +func (d *LocalTime) UnmarshalText(b []byte) error { + res, left, err := parseLocalTime(b) + if err == nil && len(left) != 0 { + err = newDecodeError(left, "extra characters") + } + if err != nil { + return err + } + *d = res + return nil +} + +// LocalDateTime represents a time of a specific day in no specific timezone. +type LocalDateTime struct { + LocalDate + LocalTime +} + +// AsTime converts d into a specific time instance in zone. +func (d LocalDateTime) AsTime(zone *time.Location) time.Time { + return time.Date(d.Year, time.Month(d.Month), d.Day, d.Hour, d.Minute, d.Second, d.Nanosecond, zone) +} + +// String returns RFC 3339 representation of d. +func (d LocalDateTime) String() string { + return d.LocalDate.String() + "T" + d.LocalTime.String() +} + +// MarshalText returns RFC 3339 representation of d. +func (d LocalDateTime) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses b using RFC 3339 to fill d. +func (d *LocalDateTime) UnmarshalText(data []byte) error { + res, left, err := parseLocalDateTime(data) + if err == nil && len(left) != 0 { + err = newDecodeError(left, "extra characters") + } + if err != nil { + return err + } + + *d = res + return nil +} diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go new file mode 100644 index 00000000..91f3b3c2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -0,0 +1,950 @@ +package toml + +import ( + "bytes" + "encoding" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + "unicode" +) + +// Marshal serializes a Go value as a TOML document. +// +// It is a shortcut for Encoder.Encode() with the default options. +func Marshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + + err := enc.Encode(v) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Encoder writes a TOML document to an output stream. +type Encoder struct { + // output + w io.Writer + + // global settings + tablesInline bool + arraysMultiline bool + indentSymbol string + indentTables bool +} + +// NewEncoder returns a new Encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + indentSymbol: " ", + } +} + +// SetTablesInline forces the encoder to emit all tables inline. +// +// This behavior can be controlled on an individual struct field basis with the +// inline tag: +// +// MyField `inline:"true"` +func (enc *Encoder) SetTablesInline(inline bool) *Encoder { + enc.tablesInline = inline + return enc +} + +// SetArraysMultiline forces the encoder to emit all arrays with one element per +// line. +// +// This behavior can be controlled on an individual struct field basis with the multiline tag: +// +// MyField `multiline:"true"` +func (enc *Encoder) SetArraysMultiline(multiline bool) *Encoder { + enc.arraysMultiline = multiline + return enc +} + +// SetIndentSymbol defines the string that should be used for indentation. The +// provided string is repeated for each indentation level. Defaults to two +// spaces. +func (enc *Encoder) SetIndentSymbol(s string) *Encoder { + enc.indentSymbol = s + return enc +} + +// SetIndentTables forces the encoder to intent tables and array tables. +func (enc *Encoder) SetIndentTables(indent bool) *Encoder { + enc.indentTables = indent + return enc +} + +// Encode writes a TOML representation of v to the stream. +// +// If v cannot be represented to TOML it returns an error. +// +// Encoding rules +// +// A top level slice containing only maps or structs is encoded as [[table +// array]]. +// +// All slices not matching rule 1 are encoded as [array]. As a result, any map +// or struct they contain is encoded as an {inline table}. +// +// Nil interfaces and nil pointers are not supported. +// +// Keys in key-values always have one part. +// +// Intermediate tables are always printed. +// +// By default, strings are encoded as literal string, unless they contain either +// a newline character or a single quote. In that case they are emitted as +// quoted strings. +// +// When encoding structs, fields are encoded in order of definition, with their +// exact name. +// +// Struct tags +// +// The encoding of each public struct field can be customized by the format +// string in the "toml" key of the struct field's tag. This follows +// encoding/json's convention. The format string starts with the name of the +// field, optionally followed by a comma-separated list of options. The name may +// be empty in order to provide options without overriding the default name. +// +// The "multiline" option emits strings as quoted multi-line TOML strings. It +// has no effect on fields that would not be encoded as strings. +// +// The "inline" option turns fields that would be emitted as tables into inline +// tables instead. It has no effect on other fields. +// +// The "omitempty" option prevents empty values or groups from being emitted. +// +// In addition to the "toml" tag struct tag, a "comment" tag can be used to emit +// a TOML comment before the value being annotated. Comments are ignored inside +// inline tables. +func (enc *Encoder) Encode(v interface{}) error { + var ( + b []byte + ctx encoderCtx + ) + + ctx.inline = enc.tablesInline + + if v == nil { + return fmt.Errorf("toml: cannot encode a nil interface") + } + + b, err := enc.encode(b, ctx, reflect.ValueOf(v)) + if err != nil { + return err + } + + _, err = enc.w.Write(b) + if err != nil { + return fmt.Errorf("toml: cannot write: %w", err) + } + + return nil +} + +type valueOptions struct { + multiline bool + omitempty bool + comment string +} + +type encoderCtx struct { + // Current top-level key. + parentKey []string + + // Key that should be used for a KV. + key string + // Extra flag to account for the empty string + hasKey bool + + // Set to true to indicate that the encoder is inside a KV, so that all + // tables need to be inlined. + insideKv bool + + // Set to true to skip the first table header in an array table. + skipTableHeader bool + + // Should the next table be encoded as inline + inline bool + + // Indentation level + indent int + + // Options coming from struct tags + options valueOptions +} + +func (ctx *encoderCtx) shiftKey() { + if ctx.hasKey { + ctx.parentKey = append(ctx.parentKey, ctx.key) + ctx.clearKey() + } +} + +func (ctx *encoderCtx) setKey(k string) { + ctx.key = k + ctx.hasKey = true +} + +func (ctx *encoderCtx) clearKey() { + ctx.key = "" + ctx.hasKey = false +} + +func (ctx *encoderCtx) isRoot() bool { + return len(ctx.parentKey) == 0 && !ctx.hasKey +} + +func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + i := v.Interface() + + switch x := i.(type) { + case time.Time: + if x.Nanosecond() > 0 { + return x.AppendFormat(b, time.RFC3339Nano), nil + } + return x.AppendFormat(b, time.RFC3339), nil + case LocalTime: + return append(b, x.String()...), nil + case LocalDate: + return append(b, x.String()...), nil + case LocalDateTime: + return append(b, x.String()...), nil + } + + hasTextMarshaler := v.Type().Implements(textMarshalerType) + if hasTextMarshaler || (v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if !hasTextMarshaler { + v = v.Addr() + } + + if ctx.isRoot() { + return nil, fmt.Errorf("toml: type %s implementing the TextMarshaler interface cannot be a root element", v.Type()) + } + + text, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return nil, err + } + + b = enc.encodeString(b, string(text), ctx.options) + + return b, nil + } + + switch v.Kind() { + // containers + case reflect.Map: + return enc.encodeMap(b, ctx, v) + case reflect.Struct: + return enc.encodeStruct(b, ctx, v) + case reflect.Slice: + return enc.encodeSlice(b, ctx, v) + case reflect.Interface: + if v.IsNil() { + return nil, fmt.Errorf("toml: encoding a nil interface is not supported") + } + + return enc.encode(b, ctx, v.Elem()) + case reflect.Ptr: + if v.IsNil() { + return enc.encode(b, ctx, reflect.Zero(v.Type().Elem())) + } + + return enc.encode(b, ctx, v.Elem()) + + // values + case reflect.String: + b = enc.encodeString(b, v.String(), ctx.options) + case reflect.Float32: + f := v.Float() + + if math.IsNaN(f) { + b = append(b, "nan"...) + } else if f > math.MaxFloat32 { + b = append(b, "inf"...) + } else if f < -math.MaxFloat32 { + b = append(b, "-inf"...) + } else if math.Trunc(f) == f { + b = strconv.AppendFloat(b, f, 'f', 1, 32) + } else { + b = strconv.AppendFloat(b, f, 'f', -1, 32) + } + case reflect.Float64: + f := v.Float() + if math.IsNaN(f) { + b = append(b, "nan"...) + } else if f > math.MaxFloat64 { + b = append(b, "inf"...) + } else if f < -math.MaxFloat64 { + b = append(b, "-inf"...) + } else if math.Trunc(f) == f { + b = strconv.AppendFloat(b, f, 'f', 1, 64) + } else { + b = strconv.AppendFloat(b, f, 'f', -1, 64) + } + case reflect.Bool: + if v.Bool() { + b = append(b, "true"...) + } else { + b = append(b, "false"...) + } + case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint: + b = strconv.AppendUint(b, v.Uint(), 10) + case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int: + b = strconv.AppendInt(b, v.Int(), 10) + default: + return nil, fmt.Errorf("toml: cannot encode value of type %s", v.Kind()) + } + + return b, nil +} + +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Map: + return v.IsNil() + default: + return false + } +} + +func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v reflect.Value) ([]byte, error) { + var err error + + if (ctx.options.omitempty || options.omitempty) && isEmptyValue(v) { + return b, nil + } + + if !ctx.inline { + b = enc.encodeComment(ctx.indent, options.comment, b) + } + + b = enc.indent(ctx.indent, b) + b = enc.encodeKey(b, ctx.key) + b = append(b, " = "...) + + // create a copy of the context because the value of a KV shouldn't + // modify the global context. + subctx := ctx + subctx.insideKv = true + subctx.shiftKey() + subctx.options = options + + b, err = enc.encode(b, subctx, v) + if err != nil { + return nil, err + } + + return b, nil +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +const literalQuote = '\'' + +func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byte { + if needsQuoting(v) { + return enc.encodeQuotedString(options.multiline, b, v) + } + + return enc.encodeLiteralString(b, v) +} + +func needsQuoting(v string) bool { + // TODO: vectorize + for _, b := range []byte(v) { + if b == '\'' || b == '\r' || b == '\n' || invalidAscii(b) { + return true + } + } + return false +} + +// caller should have checked that the string does not contain new lines or ' . +func (enc *Encoder) encodeLiteralString(b []byte, v string) []byte { + b = append(b, literalQuote) + b = append(b, v...) + b = append(b, literalQuote) + + return b +} + +//nolint:cyclop +func (enc *Encoder) encodeQuotedString(multiline bool, b []byte, v string) []byte { + stringQuote := `"` + + if multiline { + stringQuote = `"""` + } + + b = append(b, stringQuote...) + if multiline { + b = append(b, '\n') + } + + const ( + hextable = "0123456789ABCDEF" + // U+0000 to U+0008, U+000A to U+001F, U+007F + nul = 0x0 + bs = 0x8 + lf = 0xa + us = 0x1f + del = 0x7f + ) + + for _, r := range []byte(v) { + switch r { + case '\\': + b = append(b, `\\`...) + case '"': + b = append(b, `\"`...) + case '\b': + b = append(b, `\b`...) + case '\f': + b = append(b, `\f`...) + case '\n': + if multiline { + b = append(b, r) + } else { + b = append(b, `\n`...) + } + case '\r': + b = append(b, `\r`...) + case '\t': + b = append(b, `\t`...) + default: + switch { + case r >= nul && r <= bs, r >= lf && r <= us, r == del: + b = append(b, `\u00`...) + b = append(b, hextable[r>>4]) + b = append(b, hextable[r&0x0f]) + default: + b = append(b, r) + } + } + } + + b = append(b, stringQuote...) + + return b +} + +// caller should have checked that the string is in A-Z / a-z / 0-9 / - / _ . +func (enc *Encoder) encodeUnquotedKey(b []byte, v string) []byte { + return append(b, v...) +} + +func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) ([]byte, error) { + if len(ctx.parentKey) == 0 { + return b, nil + } + + b = enc.encodeComment(ctx.indent, ctx.options.comment, b) + + b = enc.indent(ctx.indent, b) + + b = append(b, '[') + + b = enc.encodeKey(b, ctx.parentKey[0]) + + for _, k := range ctx.parentKey[1:] { + b = append(b, '.') + b = enc.encodeKey(b, k) + } + + b = append(b, "]\n"...) + + return b, nil +} + +//nolint:cyclop +func (enc *Encoder) encodeKey(b []byte, k string) []byte { + needsQuotation := false + cannotUseLiteral := false + + if len(k) == 0 { + return append(b, "''"...) + } + + for _, c := range k { + if (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '-' || c == '_' { + continue + } + + if c == literalQuote { + cannotUseLiteral = true + } + + needsQuotation = true + } + + if needsQuotation && needsQuoting(k) { + cannotUseLiteral = true + } + + switch { + case cannotUseLiteral: + return enc.encodeQuotedString(false, b, k) + case needsQuotation: + return enc.encodeLiteralString(b, k) + default: + return enc.encodeUnquotedKey(b, k) + } +} + +func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + if v.Type().Key().Kind() != reflect.String { + return nil, fmt.Errorf("toml: type %s is not supported as a map key", v.Type().Key().Kind()) + } + + var ( + t table + emptyValueOptions valueOptions + ) + + iter := v.MapRange() + for iter.Next() { + k := iter.Key().String() + v := iter.Value() + + if isNil(v) { + continue + } + + if willConvertToTableOrArrayTable(ctx, v) { + t.pushTable(k, v, emptyValueOptions) + } else { + t.pushKV(k, v, emptyValueOptions) + } + } + + sortEntriesByKey(t.kvs) + sortEntriesByKey(t.tables) + + return enc.encodeTable(b, ctx, t) +} + +func sortEntriesByKey(e []entry) { + sort.Slice(e, func(i, j int) bool { + return e[i].Key < e[j].Key + }) +} + +type entry struct { + Key string + Value reflect.Value + Options valueOptions +} + +type table struct { + kvs []entry + tables []entry +} + +func (t *table) pushKV(k string, v reflect.Value, options valueOptions) { + for _, e := range t.kvs { + if e.Key == k { + return + } + } + + t.kvs = append(t.kvs, entry{Key: k, Value: v, Options: options}) +} + +func (t *table) pushTable(k string, v reflect.Value, options valueOptions) { + for _, e := range t.tables { + if e.Key == k { + return + } + } + t.tables = append(t.tables, entry{Key: k, Value: v, Options: options}) +} + +func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { + // TODO: cache this + typ := v.Type() + for i := 0; i < typ.NumField(); i++ { + fieldType := typ.Field(i) + + // only consider exported fields + if fieldType.PkgPath != "" { + continue + } + + tag := fieldType.Tag.Get("toml") + + // special field name to skip field + if tag == "-" { + continue + } + + k, opts := parseTag(tag) + if !isValidName(k) { + k = "" + } + + f := v.Field(i) + + if k == "" { + if fieldType.Anonymous { + if fieldType.Type.Kind() == reflect.Struct { + walkStruct(ctx, t, f) + } + continue + } else { + k = fieldType.Name + } + } + + if isNil(f) { + continue + } + + options := valueOptions{ + multiline: opts.multiline, + omitempty: opts.omitempty, + comment: fieldType.Tag.Get("comment"), + } + + if opts.inline || !willConvertToTableOrArrayTable(ctx, f) { + t.pushKV(k, f, options) + } else { + t.pushTable(k, f, options) + } + } +} + +func (enc *Encoder) encodeStruct(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + var t table + + walkStruct(ctx, &t, v) + + return enc.encodeTable(b, ctx, t) +} + +func (enc *Encoder) encodeComment(indent int, comment string, b []byte) []byte { + if comment != "" { + b = enc.indent(indent, b) + b = append(b, "# "...) + b = append(b, comment...) + b = append(b, '\n') + } + return b +} + +func isValidName(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + case !unicode.IsLetter(c) && !unicode.IsDigit(c): + return false + } + } + return true +} + +type tagOptions struct { + multiline bool + inline bool + omitempty bool +} + +func parseTag(tag string) (string, tagOptions) { + opts := tagOptions{} + + idx := strings.Index(tag, ",") + if idx == -1 { + return tag, opts + } + + raw := tag[idx+1:] + tag = string(tag[:idx]) + for raw != "" { + var o string + i := strings.Index(raw, ",") + if i >= 0 { + o, raw = raw[:i], raw[i+1:] + } else { + o, raw = raw, "" + } + switch o { + case "multiline": + opts.multiline = true + case "inline": + opts.inline = true + case "omitempty": + opts.omitempty = true + } + } + + return tag, opts +} + +func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, error) { + var err error + + ctx.shiftKey() + + if ctx.insideKv || (ctx.inline && !ctx.isRoot()) { + return enc.encodeTableInline(b, ctx, t) + } + + if !ctx.skipTableHeader { + b, err = enc.encodeTableHeader(ctx, b) + if err != nil { + return nil, err + } + + if enc.indentTables && len(ctx.parentKey) > 0 { + ctx.indent++ + } + } + ctx.skipTableHeader = false + + for _, kv := range t.kvs { + ctx.setKey(kv.Key) + + b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value) + if err != nil { + return nil, err + } + + b = append(b, '\n') + } + + for _, table := range t.tables { + ctx.setKey(table.Key) + + ctx.options = table.Options + + b, err = enc.encode(b, ctx, table.Value) + if err != nil { + return nil, err + } + + b = append(b, '\n') + } + + return b, nil +} + +func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte, error) { + var err error + + b = append(b, '{') + + first := true + for _, kv := range t.kvs { + if first { + first = false + } else { + b = append(b, `, `...) + } + + ctx.setKey(kv.Key) + + b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value) + if err != nil { + return nil, err + } + } + + if len(t.tables) > 0 { + panic("inline table cannot contain nested tables, online key-values") + } + + b = append(b, "}"...) + + return b, nil +} + +func willConvertToTable(ctx encoderCtx, v reflect.Value) bool { + if !v.IsValid() { + return false + } + if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + return false + } + + t := v.Type() + switch t.Kind() { + case reflect.Map, reflect.Struct: + return !ctx.inline + case reflect.Interface: + return willConvertToTable(ctx, v.Elem()) + case reflect.Ptr: + if v.IsNil() { + return false + } + + return willConvertToTable(ctx, v.Elem()) + default: + return false + } +} + +func willConvertToTableOrArrayTable(ctx encoderCtx, v reflect.Value) bool { + if ctx.insideKv { + return false + } + t := v.Type() + + if t.Kind() == reflect.Interface { + return willConvertToTableOrArrayTable(ctx, v.Elem()) + } + + if t.Kind() == reflect.Slice { + if v.Len() == 0 { + // An empty slice should be a kv = []. + return false + } + + for i := 0; i < v.Len(); i++ { + t := willConvertToTable(ctx, v.Index(i)) + + if !t { + return false + } + } + + return true + } + + return willConvertToTable(ctx, v) +} + +func (enc *Encoder) encodeSlice(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + if v.Len() == 0 { + b = append(b, "[]"...) + + return b, nil + } + + if willConvertToTableOrArrayTable(ctx, v) { + return enc.encodeSliceAsArrayTable(b, ctx, v) + } + + return enc.encodeSliceAsArray(b, ctx, v) +} + +// caller should have checked that v is a slice that only contains values that +// encode into tables. +func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + ctx.shiftKey() + + scratch := make([]byte, 0, 64) + scratch = append(scratch, "[["...) + + for i, k := range ctx.parentKey { + if i > 0 { + scratch = append(scratch, '.') + } + + scratch = enc.encodeKey(scratch, k) + } + + scratch = append(scratch, "]]\n"...) + ctx.skipTableHeader = true + + for i := 0; i < v.Len(); i++ { + b = append(b, scratch...) + + var err error + b, err = enc.encode(b, ctx, v.Index(i)) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (enc *Encoder) encodeSliceAsArray(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + multiline := ctx.options.multiline || enc.arraysMultiline + separator := ", " + + b = append(b, '[') + + subCtx := ctx + subCtx.options = valueOptions{} + + if multiline { + separator = ",\n" + + b = append(b, '\n') + + subCtx.indent++ + } + + var err error + first := true + + for i := 0; i < v.Len(); i++ { + if first { + first = false + } else { + b = append(b, separator...) + } + + if multiline { + b = enc.indent(subCtx.indent, b) + } + + b, err = enc.encode(b, subCtx, v.Index(i)) + if err != nil { + return nil, err + } + } + + if multiline { + b = append(b, '\n') + b = enc.indent(ctx.indent, b) + } + + b = append(b, ']') + + return b, nil +} + +func (enc *Encoder) indent(level int, b []byte) []byte { + for i := 0; i < level; i++ { + b = append(b, enc.indentSymbol...) + } + + return b +} diff --git a/vendor/github.com/pelletier/go-toml/v2/parser.go b/vendor/github.com/pelletier/go-toml/v2/parser.go new file mode 100644 index 00000000..9859a795 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/parser.go @@ -0,0 +1,1086 @@ +package toml + +import ( + "bytes" + "unicode" + + "github.com/pelletier/go-toml/v2/internal/ast" + "github.com/pelletier/go-toml/v2/internal/danger" +) + +type parser struct { + builder ast.Builder + ref ast.Reference + data []byte + left []byte + err error + first bool +} + +func (p *parser) Range(b []byte) ast.Range { + return ast.Range{ + Offset: uint32(danger.SubsliceOffset(p.data, b)), + Length: uint32(len(b)), + } +} + +func (p *parser) Raw(raw ast.Range) []byte { + return p.data[raw.Offset : raw.Offset+raw.Length] +} + +func (p *parser) Reset(b []byte) { + p.builder.Reset() + p.ref = ast.InvalidReference + p.data = b + p.left = b + p.err = nil + p.first = true +} + +//nolint:cyclop +func (p *parser) NextExpression() bool { + if len(p.left) == 0 || p.err != nil { + return false + } + + p.builder.Reset() + p.ref = ast.InvalidReference + + for { + if len(p.left) == 0 || p.err != nil { + return false + } + + if !p.first { + p.left, p.err = p.parseNewline(p.left) + } + + if len(p.left) == 0 || p.err != nil { + return false + } + + p.ref, p.left, p.err = p.parseExpression(p.left) + + if p.err != nil { + return false + } + + p.first = false + + if p.ref.Valid() { + return true + } + } +} + +func (p *parser) Expression() *ast.Node { + return p.builder.NodeAt(p.ref) +} + +func (p *parser) Error() error { + return p.err +} + +func (p *parser) parseNewline(b []byte) ([]byte, error) { + if b[0] == '\n' { + return b[1:], nil + } + + if b[0] == '\r' { + _, rest, err := scanWindowsNewline(b) + return rest, err + } + + return nil, newDecodeError(b[0:1], "expected newline but got %#U", b[0]) +} + +func (p *parser) parseExpression(b []byte) (ast.Reference, []byte, error) { + // expression = ws [ comment ] + // expression =/ ws keyval ws [ comment ] + // expression =/ ws table ws [ comment ] + ref := ast.InvalidReference + + b = p.parseWhitespace(b) + + if len(b) == 0 { + return ref, b, nil + } + + if b[0] == '#' { + _, rest, err := scanComment(b) + return ref, rest, err + } + + if b[0] == '\n' || b[0] == '\r' { + return ref, b, nil + } + + var err error + if b[0] == '[' { + ref, b, err = p.parseTable(b) + } else { + ref, b, err = p.parseKeyval(b) + } + + if err != nil { + return ref, nil, err + } + + b = p.parseWhitespace(b) + + if len(b) > 0 && b[0] == '#' { + _, rest, err := scanComment(b) + return ref, rest, err + } + + return ref, b, nil +} + +func (p *parser) parseTable(b []byte) (ast.Reference, []byte, error) { + // table = std-table / array-table + if len(b) > 1 && b[1] == '[' { + return p.parseArrayTable(b) + } + + return p.parseStdTable(b) +} + +func (p *parser) parseArrayTable(b []byte) (ast.Reference, []byte, error) { + // array-table = array-table-open key array-table-close + // array-table-open = %x5B.5B ws ; [[ Double left square bracket + // array-table-close = ws %x5D.5D ; ]] Double right square bracket + ref := p.builder.Push(ast.Node{ + Kind: ast.ArrayTable, + }) + + b = b[2:] + b = p.parseWhitespace(b) + + k, b, err := p.parseKey(b) + if err != nil { + return ref, nil, err + } + + p.builder.AttachChild(ref, k) + b = p.parseWhitespace(b) + + b, err = expect(']', b) + if err != nil { + return ref, nil, err + } + + b, err = expect(']', b) + + return ref, b, err +} + +func (p *parser) parseStdTable(b []byte) (ast.Reference, []byte, error) { + // std-table = std-table-open key std-table-close + // std-table-open = %x5B ws ; [ Left square bracket + // std-table-close = ws %x5D ; ] Right square bracket + ref := p.builder.Push(ast.Node{ + Kind: ast.Table, + }) + + b = b[1:] + b = p.parseWhitespace(b) + + key, b, err := p.parseKey(b) + if err != nil { + return ref, nil, err + } + + p.builder.AttachChild(ref, key) + + b = p.parseWhitespace(b) + + b, err = expect(']', b) + + return ref, b, err +} + +func (p *parser) parseKeyval(b []byte) (ast.Reference, []byte, error) { + // keyval = key keyval-sep val + ref := p.builder.Push(ast.Node{ + Kind: ast.KeyValue, + }) + + key, b, err := p.parseKey(b) + if err != nil { + return ast.InvalidReference, nil, err + } + + // keyval-sep = ws %x3D ws ; = + + b = p.parseWhitespace(b) + + if len(b) == 0 { + return ast.InvalidReference, nil, newDecodeError(b, "expected = after a key, but the document ends there") + } + + b, err = expect('=', b) + if err != nil { + return ast.InvalidReference, nil, err + } + + b = p.parseWhitespace(b) + + valRef, b, err := p.parseVal(b) + if err != nil { + return ref, b, err + } + + p.builder.Chain(valRef, key) + p.builder.AttachChild(ref, valRef) + + return ref, b, err +} + +//nolint:cyclop,funlen +func (p *parser) parseVal(b []byte) (ast.Reference, []byte, error) { + // val = string / boolean / array / inline-table / date-time / float / integer + ref := ast.InvalidReference + + if len(b) == 0 { + return ref, nil, newDecodeError(b, "expected value, not eof") + } + + var err error + c := b[0] + + switch c { + case '"': + var raw []byte + var v []byte + if scanFollowsMultilineBasicStringDelimiter(b) { + raw, v, b, err = p.parseMultilineBasicString(b) + } else { + raw, v, b, err = p.parseBasicString(b) + } + + if err == nil { + ref = p.builder.Push(ast.Node{ + Kind: ast.String, + Raw: p.Range(raw), + Data: v, + }) + } + + return ref, b, err + case '\'': + var raw []byte + var v []byte + if scanFollowsMultilineLiteralStringDelimiter(b) { + raw, v, b, err = p.parseMultilineLiteralString(b) + } else { + raw, v, b, err = p.parseLiteralString(b) + } + + if err == nil { + ref = p.builder.Push(ast.Node{ + Kind: ast.String, + Raw: p.Range(raw), + Data: v, + }) + } + + return ref, b, err + case 't': + if !scanFollowsTrue(b) { + return ref, nil, newDecodeError(atmost(b, 4), "expected 'true'") + } + + ref = p.builder.Push(ast.Node{ + Kind: ast.Bool, + Data: b[:4], + }) + + return ref, b[4:], nil + case 'f': + if !scanFollowsFalse(b) { + return ref, nil, newDecodeError(atmost(b, 5), "expected 'false'") + } + + ref = p.builder.Push(ast.Node{ + Kind: ast.Bool, + Data: b[:5], + }) + + return ref, b[5:], nil + case '[': + return p.parseValArray(b) + case '{': + return p.parseInlineTable(b) + default: + return p.parseIntOrFloatOrDateTime(b) + } +} + +func atmost(b []byte, n int) []byte { + if n >= len(b) { + return b + } + + return b[:n] +} + +func (p *parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) { + v, rest, err := scanLiteralString(b) + if err != nil { + return nil, nil, nil, err + } + + return v, v[1 : len(v)-1], rest, nil +} + +func (p *parser) parseInlineTable(b []byte) (ast.Reference, []byte, error) { + // inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close + // inline-table-open = %x7B ws ; { + // inline-table-close = ws %x7D ; } + // inline-table-sep = ws %x2C ws ; , Comma + // inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] + parent := p.builder.Push(ast.Node{ + Kind: ast.InlineTable, + }) + + first := true + + var child ast.Reference + + b = b[1:] + + var err error + + for len(b) > 0 { + previousB := b + b = p.parseWhitespace(b) + + if len(b) == 0 { + return parent, nil, newDecodeError(previousB[:1], "inline table is incomplete") + } + + if b[0] == '}' { + break + } + + if !first { + b, err = expect(',', b) + if err != nil { + return parent, nil, err + } + b = p.parseWhitespace(b) + } + + var kv ast.Reference + + kv, b, err = p.parseKeyval(b) + if err != nil { + return parent, nil, err + } + + if first { + p.builder.AttachChild(parent, kv) + } else { + p.builder.Chain(child, kv) + } + child = kv + + first = false + } + + rest, err := expect('}', b) + + return parent, rest, err +} + +//nolint:funlen,cyclop +func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) { + // array = array-open [ array-values ] ws-comment-newline array-close + // array-open = %x5B ; [ + // array-close = %x5D ; ] + // array-values = ws-comment-newline val ws-comment-newline array-sep array-values + // array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ] + // array-sep = %x2C ; , Comma + // ws-comment-newline = *( wschar / [ comment ] newline ) + arrayStart := b + b = b[1:] + + parent := p.builder.Push(ast.Node{ + Kind: ast.Array, + }) + + first := true + + var lastChild ast.Reference + + var err error + for len(b) > 0 { + b, err = p.parseOptionalWhitespaceCommentNewline(b) + if err != nil { + return parent, nil, err + } + + if len(b) == 0 { + return parent, nil, newDecodeError(arrayStart[:1], "array is incomplete") + } + + if b[0] == ']' { + break + } + + if b[0] == ',' { + if first { + return parent, nil, newDecodeError(b[0:1], "array cannot start with comma") + } + b = b[1:] + + b, err = p.parseOptionalWhitespaceCommentNewline(b) + if err != nil { + return parent, nil, err + } + } else if !first { + return parent, nil, newDecodeError(b[0:1], "array elements must be separated by commas") + } + + // TOML allows trailing commas in arrays. + if len(b) > 0 && b[0] == ']' { + break + } + + var valueRef ast.Reference + valueRef, b, err = p.parseVal(b) + if err != nil { + return parent, nil, err + } + + if first { + p.builder.AttachChild(parent, valueRef) + } else { + p.builder.Chain(lastChild, valueRef) + } + lastChild = valueRef + + b, err = p.parseOptionalWhitespaceCommentNewline(b) + if err != nil { + return parent, nil, err + } + first = false + } + + rest, err := expect(']', b) + + return parent, rest, err +} + +func (p *parser) parseOptionalWhitespaceCommentNewline(b []byte) ([]byte, error) { + for len(b) > 0 { + var err error + b = p.parseWhitespace(b) + + if len(b) > 0 && b[0] == '#' { + _, b, err = scanComment(b) + if err != nil { + return nil, err + } + } + + if len(b) == 0 { + break + } + + if b[0] == '\n' || b[0] == '\r' { + b, err = p.parseNewline(b) + if err != nil { + return nil, err + } + } else { + break + } + } + + return b, nil +} + +func (p *parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) { + token, rest, err := scanMultilineLiteralString(b) + if err != nil { + return nil, nil, nil, err + } + + i := 3 + + // skip the immediate new line + if token[i] == '\n' { + i++ + } else if token[i] == '\r' && token[i+1] == '\n' { + i += 2 + } + + return token, token[i : len(token)-3], rest, err +} + +//nolint:funlen,gocognit,cyclop +func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, error) { + // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body + // ml-basic-string-delim + // ml-basic-string-delim = 3quotation-mark + // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] + // + // mlb-content = mlb-char / newline / mlb-escaped-nl + // mlb-char = mlb-unescaped / escaped + // mlb-quotes = 1*2quotation-mark + // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // mlb-escaped-nl = escape ws newline *( wschar / newline ) + token, escaped, rest, err := scanMultilineBasicString(b) + if err != nil { + return nil, nil, nil, err + } + + i := 3 + + // skip the immediate new line + if token[i] == '\n' { + i++ + } else if token[i] == '\r' && token[i+1] == '\n' { + i += 2 + } + + // fast path + startIdx := i + endIdx := len(token) - len(`"""`) + + if !escaped { + str := token[startIdx:endIdx] + verr := utf8TomlValidAlreadyEscaped(str) + if verr.Zero() { + return token, str, rest, nil + } + return nil, nil, nil, newDecodeError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") + } + + var builder bytes.Buffer + + // The scanner ensures that the token starts and ends with quotes and that + // escapes are balanced. + for i < len(token)-3 { + c := token[i] + + //nolint:nestif + if c == '\\' { + // When the last non-whitespace character on a line is an unescaped \, + // it will be trimmed along with all whitespace (including newlines) up + // to the next non-whitespace character or closing delimiter. + + isLastNonWhitespaceOnLine := false + j := 1 + findEOLLoop: + for ; j < len(token)-3-i; j++ { + switch token[i+j] { + case ' ', '\t': + continue + case '\r': + if token[i+j+1] == '\n' { + continue + } + case '\n': + isLastNonWhitespaceOnLine = true + } + break findEOLLoop + } + if isLastNonWhitespaceOnLine { + i += j + for ; i < len(token)-3; i++ { + c := token[i] + if !(c == '\n' || c == '\r' || c == ' ' || c == '\t') { + i-- + break + } + } + i++ + continue + } + + // handle escaping + i++ + c = token[i] + + switch c { + case '"', '\\': + builder.WriteByte(c) + case 'b': + builder.WriteByte('\b') + case 'f': + builder.WriteByte('\f') + case 'n': + builder.WriteByte('\n') + case 'r': + builder.WriteByte('\r') + case 't': + builder.WriteByte('\t') + case 'e': + builder.WriteByte(0x1B) + case 'u': + x, err := hexToRune(atmost(token[i+1:], 4), 4) + if err != nil { + return nil, nil, nil, err + } + builder.WriteRune(x) + i += 4 + case 'U': + x, err := hexToRune(atmost(token[i+1:], 8), 8) + if err != nil { + return nil, nil, nil, err + } + + builder.WriteRune(x) + i += 8 + default: + return nil, nil, nil, newDecodeError(token[i:i+1], "invalid escaped character %#U", c) + } + i++ + } else { + size := utf8ValidNext(token[i:]) + if size == 0 { + return nil, nil, nil, newDecodeError(token[i:i+1], "invalid character %#U", c) + } + builder.Write(token[i : i+size]) + i += size + } + } + + return token, builder.Bytes(), rest, nil +} + +func (p *parser) parseKey(b []byte) (ast.Reference, []byte, error) { + // key = simple-key / dotted-key + // simple-key = quoted-key / unquoted-key + // + // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ + // quoted-key = basic-string / literal-string + // dotted-key = simple-key 1*( dot-sep simple-key ) + // + // dot-sep = ws %x2E ws ; . Period + raw, key, b, err := p.parseSimpleKey(b) + if err != nil { + return ast.InvalidReference, nil, err + } + + ref := p.builder.Push(ast.Node{ + Kind: ast.Key, + Raw: p.Range(raw), + Data: key, + }) + + for { + b = p.parseWhitespace(b) + if len(b) > 0 && b[0] == '.' { + b = p.parseWhitespace(b[1:]) + + raw, key, b, err = p.parseSimpleKey(b) + if err != nil { + return ref, nil, err + } + + p.builder.PushAndChain(ast.Node{ + Kind: ast.Key, + Raw: p.Range(raw), + Data: key, + }) + } else { + break + } + } + + return ref, b, nil +} + +func (p *parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) { + if len(b) == 0 { + return nil, nil, nil, newDecodeError(b, "expected key but found none") + } + + // simple-key = quoted-key / unquoted-key + // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ + // quoted-key = basic-string / literal-string + switch { + case b[0] == '\'': + return p.parseLiteralString(b) + case b[0] == '"': + return p.parseBasicString(b) + case isUnquotedKeyChar(b[0]): + key, rest = scanUnquotedKey(b) + return key, key, rest, nil + default: + return nil, nil, nil, newDecodeError(b[0:1], "invalid character at start of key: %c", b[0]) + } +} + +//nolint:funlen,cyclop +func (p *parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { + // basic-string = quotation-mark *basic-char quotation-mark + // quotation-mark = %x22 ; " + // basic-char = basic-unescaped / escaped + // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // escaped = escape escape-seq-char + // escape-seq-char = %x22 ; " quotation mark U+0022 + // escape-seq-char =/ %x5C ; \ reverse solidus U+005C + // escape-seq-char =/ %x62 ; b backspace U+0008 + // escape-seq-char =/ %x66 ; f form feed U+000C + // escape-seq-char =/ %x6E ; n line feed U+000A + // escape-seq-char =/ %x72 ; r carriage return U+000D + // escape-seq-char =/ %x74 ; t tab U+0009 + // escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX + // escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX + token, escaped, rest, err := scanBasicString(b) + if err != nil { + return nil, nil, nil, err + } + + startIdx := len(`"`) + endIdx := len(token) - len(`"`) + + // Fast path. If there is no escape sequence, the string should just be + // an UTF-8 encoded string, which is the same as Go. In that case, + // validate the string and return a direct reference to the buffer. + if !escaped { + str := token[startIdx:endIdx] + verr := utf8TomlValidAlreadyEscaped(str) + if verr.Zero() { + return token, str, rest, nil + } + return nil, nil, nil, newDecodeError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") + } + + i := startIdx + + var builder bytes.Buffer + + // The scanner ensures that the token starts and ends with quotes and that + // escapes are balanced. + for i < len(token)-1 { + c := token[i] + if c == '\\' { + i++ + c = token[i] + + switch c { + case '"', '\\': + builder.WriteByte(c) + case 'b': + builder.WriteByte('\b') + case 'f': + builder.WriteByte('\f') + case 'n': + builder.WriteByte('\n') + case 'r': + builder.WriteByte('\r') + case 't': + builder.WriteByte('\t') + case 'e': + builder.WriteByte(0x1B) + case 'u': + x, err := hexToRune(token[i+1:len(token)-1], 4) + if err != nil { + return nil, nil, nil, err + } + + builder.WriteRune(x) + i += 4 + case 'U': + x, err := hexToRune(token[i+1:len(token)-1], 8) + if err != nil { + return nil, nil, nil, err + } + + builder.WriteRune(x) + i += 8 + default: + return nil, nil, nil, newDecodeError(token[i:i+1], "invalid escaped character %#U", c) + } + i++ + } else { + size := utf8ValidNext(token[i:]) + if size == 0 { + return nil, nil, nil, newDecodeError(token[i:i+1], "invalid character %#U", c) + } + builder.Write(token[i : i+size]) + i += size + } + } + + return token, builder.Bytes(), rest, nil +} + +func hexToRune(b []byte, length int) (rune, error) { + if len(b) < length { + return -1, newDecodeError(b, "unicode point needs %d character, not %d", length, len(b)) + } + b = b[:length] + + var r uint32 + for i, c := range b { + d := uint32(0) + switch { + case '0' <= c && c <= '9': + d = uint32(c - '0') + case 'a' <= c && c <= 'f': + d = uint32(c - 'a' + 10) + case 'A' <= c && c <= 'F': + d = uint32(c - 'A' + 10) + default: + return -1, newDecodeError(b[i:i+1], "non-hex character") + } + r = r*16 + d + } + + if r > unicode.MaxRune || 0xD800 <= r && r < 0xE000 { + return -1, newDecodeError(b, "escape sequence is invalid Unicode code point") + } + + return rune(r), nil +} + +func (p *parser) parseWhitespace(b []byte) []byte { + // ws = *wschar + // wschar = %x20 ; Space + // wschar =/ %x09 ; Horizontal tab + _, rest := scanWhitespace(b) + + return rest +} + +//nolint:cyclop +func (p *parser) parseIntOrFloatOrDateTime(b []byte) (ast.Reference, []byte, error) { + switch b[0] { + case 'i': + if !scanFollowsInf(b) { + return ast.InvalidReference, nil, newDecodeError(atmost(b, 3), "expected 'inf'") + } + + return p.builder.Push(ast.Node{ + Kind: ast.Float, + Data: b[:3], + }), b[3:], nil + case 'n': + if !scanFollowsNan(b) { + return ast.InvalidReference, nil, newDecodeError(atmost(b, 3), "expected 'nan'") + } + + return p.builder.Push(ast.Node{ + Kind: ast.Float, + Data: b[:3], + }), b[3:], nil + case '+', '-': + return p.scanIntOrFloat(b) + } + + if len(b) < 3 { + return p.scanIntOrFloat(b) + } + + s := 5 + if len(b) < s { + s = len(b) + } + + for idx, c := range b[:s] { + if isDigit(c) { + continue + } + + if idx == 2 && c == ':' || (idx == 4 && c == '-') { + return p.scanDateTime(b) + } + + break + } + + return p.scanIntOrFloat(b) +} + +func (p *parser) scanDateTime(b []byte) (ast.Reference, []byte, error) { + // scans for contiguous characters in [0-9T:Z.+-], and up to one space if + // followed by a digit. + hasDate := false + hasTime := false + hasTz := false + seenSpace := false + + i := 0 +byteLoop: + for ; i < len(b); i++ { + c := b[i] + + switch { + case isDigit(c): + case c == '-': + hasDate = true + const minOffsetOfTz = 8 + if i >= minOffsetOfTz { + hasTz = true + } + case c == 'T' || c == 't' || c == ':' || c == '.': + hasTime = true + case c == '+' || c == '-' || c == 'Z' || c == 'z': + hasTz = true + case c == ' ': + if !seenSpace && i+1 < len(b) && isDigit(b[i+1]) { + i += 2 + // Avoid reaching past the end of the document in case the time + // is malformed. See TestIssue585. + if i >= len(b) { + i-- + } + seenSpace = true + hasTime = true + } else { + break byteLoop + } + default: + break byteLoop + } + } + + var kind ast.Kind + + if hasTime { + if hasDate { + if hasTz { + kind = ast.DateTime + } else { + kind = ast.LocalDateTime + } + } else { + kind = ast.LocalTime + } + } else { + kind = ast.LocalDate + } + + return p.builder.Push(ast.Node{ + Kind: kind, + Data: b[:i], + }), b[i:], nil +} + +//nolint:funlen,gocognit,cyclop +func (p *parser) scanIntOrFloat(b []byte) (ast.Reference, []byte, error) { + i := 0 + + if len(b) > 2 && b[0] == '0' && b[1] != '.' && b[1] != 'e' && b[1] != 'E' { + var isValidRune validRuneFn + + switch b[1] { + case 'x': + isValidRune = isValidHexRune + case 'o': + isValidRune = isValidOctalRune + case 'b': + isValidRune = isValidBinaryRune + default: + i++ + } + + if isValidRune != nil { + i += 2 + for ; i < len(b); i++ { + if !isValidRune(b[i]) { + break + } + } + } + + return p.builder.Push(ast.Node{ + Kind: ast.Integer, + Data: b[:i], + }), b[i:], nil + } + + isFloat := false + + for ; i < len(b); i++ { + c := b[i] + + if c >= '0' && c <= '9' || c == '+' || c == '-' || c == '_' { + continue + } + + if c == '.' || c == 'e' || c == 'E' { + isFloat = true + + continue + } + + if c == 'i' { + if scanFollowsInf(b[i:]) { + return p.builder.Push(ast.Node{ + Kind: ast.Float, + Data: b[:i+3], + }), b[i+3:], nil + } + + return ast.InvalidReference, nil, newDecodeError(b[i:i+1], "unexpected character 'i' while scanning for a number") + } + + if c == 'n' { + if scanFollowsNan(b[i:]) { + return p.builder.Push(ast.Node{ + Kind: ast.Float, + Data: b[:i+3], + }), b[i+3:], nil + } + + return ast.InvalidReference, nil, newDecodeError(b[i:i+1], "unexpected character 'n' while scanning for a number") + } + + break + } + + if i == 0 { + return ast.InvalidReference, b, newDecodeError(b, "incomplete number") + } + + kind := ast.Integer + + if isFloat { + kind = ast.Float + } + + return p.builder.Push(ast.Node{ + Kind: kind, + Data: b[:i], + }), b[i:], nil +} + +func isDigit(r byte) bool { + return r >= '0' && r <= '9' +} + +type validRuneFn func(r byte) bool + +func isValidHexRune(r byte) bool { + return r >= 'a' && r <= 'f' || + r >= 'A' && r <= 'F' || + r >= '0' && r <= '9' || + r == '_' +} + +func isValidOctalRune(r byte) bool { + return r >= '0' && r <= '7' || r == '_' +} + +func isValidBinaryRune(r byte) bool { + return r == '0' || r == '1' || r == '_' +} + +func expect(x byte, b []byte) ([]byte, error) { + if len(b) == 0 { + return nil, newDecodeError(b, "expected character %c but the document ended here", x) + } + + if b[0] != x { + return nil, newDecodeError(b[0:1], "expected character %c", x) + } + + return b[1:], nil +} diff --git a/vendor/github.com/pelletier/go-toml/v2/scanner.go b/vendor/github.com/pelletier/go-toml/v2/scanner.go new file mode 100644 index 00000000..bb445fab --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/scanner.go @@ -0,0 +1,269 @@ +package toml + +func scanFollows(b []byte, pattern string) bool { + n := len(pattern) + + return len(b) >= n && string(b[:n]) == pattern +} + +func scanFollowsMultilineBasicStringDelimiter(b []byte) bool { + return scanFollows(b, `"""`) +} + +func scanFollowsMultilineLiteralStringDelimiter(b []byte) bool { + return scanFollows(b, `'''`) +} + +func scanFollowsTrue(b []byte) bool { + return scanFollows(b, `true`) +} + +func scanFollowsFalse(b []byte) bool { + return scanFollows(b, `false`) +} + +func scanFollowsInf(b []byte) bool { + return scanFollows(b, `inf`) +} + +func scanFollowsNan(b []byte) bool { + return scanFollows(b, `nan`) +} + +func scanUnquotedKey(b []byte) ([]byte, []byte) { + // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ + for i := 0; i < len(b); i++ { + if !isUnquotedKeyChar(b[i]) { + return b[:i], b[i:] + } + } + + return b, b[len(b):] +} + +func isUnquotedKeyChar(r byte) bool { + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '-' || r == '_' +} + +func scanLiteralString(b []byte) ([]byte, []byte, error) { + // literal-string = apostrophe *literal-char apostrophe + // apostrophe = %x27 ; ' apostrophe + // literal-char = %x09 / %x20-26 / %x28-7E / non-ascii + for i := 1; i < len(b); { + switch b[i] { + case '\'': + return b[:i+1], b[i+1:], nil + case '\n', '\r': + return nil, nil, newDecodeError(b[i:i+1], "literal strings cannot have new lines") + } + size := utf8ValidNext(b[i:]) + if size == 0 { + return nil, nil, newDecodeError(b[i:i+1], "invalid character") + } + i += size + } + + return nil, nil, newDecodeError(b[len(b):], "unterminated literal string") +} + +func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) { + // ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body + // ml-literal-string-delim + // ml-literal-string-delim = 3apostrophe + // ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ] + // + // mll-content = mll-char / newline + // mll-char = %x09 / %x20-26 / %x28-7E / non-ascii + // mll-quotes = 1*2apostrophe + for i := 3; i < len(b); { + switch b[i] { + case '\'': + if scanFollowsMultilineLiteralStringDelimiter(b[i:]) { + i += 3 + + // At that point we found 3 apostrophe, and i is the + // index of the byte after the third one. The scanner + // needs to be eager, because there can be an extra 2 + // apostrophe that can be accepted at the end of the + // string. + + if i >= len(b) || b[i] != '\'' { + return b[:i], b[i:], nil + } + i++ + + if i >= len(b) || b[i] != '\'' { + return b[:i], b[i:], nil + } + i++ + + if i < len(b) && b[i] == '\'' { + return nil, nil, newDecodeError(b[i-3:i+1], "''' not allowed in multiline literal string") + } + + return b[:i], b[i:], nil + } + case '\r': + if len(b) < i+2 { + return nil, nil, newDecodeError(b[len(b):], `need a \n after \r`) + } + if b[i+1] != '\n' { + return nil, nil, newDecodeError(b[i:i+2], `need a \n after \r`) + } + i += 2 // skip the \n + continue + } + size := utf8ValidNext(b[i:]) + if size == 0 { + return nil, nil, newDecodeError(b[i:i+1], "invalid character") + } + i += size + } + + return nil, nil, newDecodeError(b[len(b):], `multiline literal string not terminated by '''`) +} + +func scanWindowsNewline(b []byte) ([]byte, []byte, error) { + const lenCRLF = 2 + if len(b) < lenCRLF { + return nil, nil, newDecodeError(b, "windows new line expected") + } + + if b[1] != '\n' { + return nil, nil, newDecodeError(b, `windows new line should be \r\n`) + } + + return b[:lenCRLF], b[lenCRLF:], nil +} + +func scanWhitespace(b []byte) ([]byte, []byte) { + for i := 0; i < len(b); i++ { + switch b[i] { + case ' ', '\t': + continue + default: + return b[:i], b[i:] + } + } + + return b, b[len(b):] +} + +//nolint:unparam +func scanComment(b []byte) ([]byte, []byte, error) { + // comment-start-symbol = %x23 ; # + // non-ascii = %x80-D7FF / %xE000-10FFFF + // non-eol = %x09 / %x20-7F / non-ascii + // + // comment = comment-start-symbol *non-eol + + for i := 1; i < len(b); { + if b[i] == '\n' { + return b[:i], b[i:], nil + } + if b[i] == '\r' { + if i+1 < len(b) && b[i+1] == '\n' { + return b[:i+1], b[i+1:], nil + } + return nil, nil, newDecodeError(b[i:i+1], "invalid character in comment") + } + size := utf8ValidNext(b[i:]) + if size == 0 { + return nil, nil, newDecodeError(b[i:i+1], "invalid character in comment") + } + + i += size + } + + return b, b[len(b):], nil +} + +func scanBasicString(b []byte) ([]byte, bool, []byte, error) { + // basic-string = quotation-mark *basic-char quotation-mark + // quotation-mark = %x22 ; " + // basic-char = basic-unescaped / escaped + // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // escaped = escape escape-seq-char + escaped := false + i := 1 + + for ; i < len(b); i++ { + switch b[i] { + case '"': + return b[:i+1], escaped, b[i+1:], nil + case '\n', '\r': + return nil, escaped, nil, newDecodeError(b[i:i+1], "basic strings cannot have new lines") + case '\\': + if len(b) < i+2 { + return nil, escaped, nil, newDecodeError(b[i:i+1], "need a character after \\") + } + escaped = true + i++ // skip the next character + } + } + + return nil, escaped, nil, newDecodeError(b[len(b):], `basic string not terminated by "`) +} + +func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) { + // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body + // ml-basic-string-delim + // ml-basic-string-delim = 3quotation-mark + // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] + // + // mlb-content = mlb-char / newline / mlb-escaped-nl + // mlb-char = mlb-unescaped / escaped + // mlb-quotes = 1*2quotation-mark + // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // mlb-escaped-nl = escape ws newline *( wschar / newline ) + + escaped := false + i := 3 + + for ; i < len(b); i++ { + switch b[i] { + case '"': + if scanFollowsMultilineBasicStringDelimiter(b[i:]) { + i += 3 + + // At that point we found 3 apostrophe, and i is the + // index of the byte after the third one. The scanner + // needs to be eager, because there can be an extra 2 + // apostrophe that can be accepted at the end of the + // string. + + if i >= len(b) || b[i] != '"' { + return b[:i], escaped, b[i:], nil + } + i++ + + if i >= len(b) || b[i] != '"' { + return b[:i], escaped, b[i:], nil + } + i++ + + if i < len(b) && b[i] == '"' { + return nil, escaped, nil, newDecodeError(b[i-3:i+1], `""" not allowed in multiline basic string`) + } + + return b[:i], escaped, b[i:], nil + } + case '\\': + if len(b) < i+2 { + return nil, escaped, nil, newDecodeError(b[len(b):], "need a character after \\") + } + escaped = true + i++ // skip the next character + case '\r': + if len(b) < i+2 { + return nil, escaped, nil, newDecodeError(b[len(b):], `need a \n after \r`) + } + if b[i+1] != '\n' { + return nil, escaped, nil, newDecodeError(b[i:i+2], `need a \n after \r`) + } + i++ // skip the \n + } + } + + return nil, escaped, nil, newDecodeError(b[len(b):], `multiline basic string not terminated by """`) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/strict.go b/vendor/github.com/pelletier/go-toml/v2/strict.go new file mode 100644 index 00000000..b7830d13 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/strict.go @@ -0,0 +1,107 @@ +package toml + +import ( + "github.com/pelletier/go-toml/v2/internal/ast" + "github.com/pelletier/go-toml/v2/internal/danger" + "github.com/pelletier/go-toml/v2/internal/tracker" +) + +type strict struct { + Enabled bool + + // Tracks the current key being processed. + key tracker.KeyTracker + + missing []decodeError +} + +func (s *strict) EnterTable(node *ast.Node) { + if !s.Enabled { + return + } + + s.key.UpdateTable(node) +} + +func (s *strict) EnterArrayTable(node *ast.Node) { + if !s.Enabled { + return + } + + s.key.UpdateArrayTable(node) +} + +func (s *strict) EnterKeyValue(node *ast.Node) { + if !s.Enabled { + return + } + + s.key.Push(node) +} + +func (s *strict) ExitKeyValue(node *ast.Node) { + if !s.Enabled { + return + } + + s.key.Pop(node) +} + +func (s *strict) MissingTable(node *ast.Node) { + if !s.Enabled { + return + } + + s.missing = append(s.missing, decodeError{ + highlight: keyLocation(node), + message: "missing table", + key: s.key.Key(), + }) +} + +func (s *strict) MissingField(node *ast.Node) { + if !s.Enabled { + return + } + + s.missing = append(s.missing, decodeError{ + highlight: keyLocation(node), + message: "missing field", + key: s.key.Key(), + }) +} + +func (s *strict) Error(doc []byte) error { + if !s.Enabled || len(s.missing) == 0 { + return nil + } + + err := &StrictMissingError{ + Errors: make([]DecodeError, 0, len(s.missing)), + } + + for _, derr := range s.missing { + derr := derr + err.Errors = append(err.Errors, *wrapDecodeError(doc, &derr)) + } + + return err +} + +func keyLocation(node *ast.Node) []byte { + k := node.Key() + + hasOne := k.Next() + if !hasOne { + panic("should not be called with empty key") + } + + start := k.Node().Data + end := k.Node().Data + + for k.Next() { + end = k.Node().Data + } + + return danger.BytesRange(start, end) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/toml.abnf b/vendor/github.com/pelletier/go-toml/v2/toml.abnf new file mode 100644 index 00000000..473f3749 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/toml.abnf @@ -0,0 +1,243 @@ +;; This document describes TOML's syntax, using the ABNF format (defined in +;; RFC 5234 -- https://www.ietf.org/rfc/rfc5234.txt). +;; +;; All valid TOML documents will match this description, however certain +;; invalid documents would need to be rejected as per the semantics described +;; in the supporting text description. + +;; It is possible to try this grammar interactively, using instaparse. +;; http://instaparse.mojombo.com/ +;; +;; To do so, in the lower right, click on Options and change `:input-format` to +;; ':abnf'. Then paste this entire ABNF document into the grammar entry box +;; (above the options). Then you can type or paste a sample TOML document into +;; the beige box on the left. Tada! + +;; Overall Structure + +toml = expression *( newline expression ) + +expression = ws [ comment ] +expression =/ ws keyval ws [ comment ] +expression =/ ws table ws [ comment ] + +;; Whitespace + +ws = *wschar +wschar = %x20 ; Space +wschar =/ %x09 ; Horizontal tab + +;; Newline + +newline = %x0A ; LF +newline =/ %x0D.0A ; CRLF + +;; Comment + +comment-start-symbol = %x23 ; # +non-ascii = %x80-D7FF / %xE000-10FFFF +non-eol = %x09 / %x20-7F / non-ascii + +comment = comment-start-symbol *non-eol + +;; Key-Value pairs + +keyval = key keyval-sep val + +key = simple-key / dotted-key +simple-key = quoted-key / unquoted-key + +unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ +quoted-key = basic-string / literal-string +dotted-key = simple-key 1*( dot-sep simple-key ) + +dot-sep = ws %x2E ws ; . Period +keyval-sep = ws %x3D ws ; = + +val = string / boolean / array / inline-table / date-time / float / integer + +;; String + +string = ml-basic-string / basic-string / ml-literal-string / literal-string + +;; Basic String + +basic-string = quotation-mark *basic-char quotation-mark + +quotation-mark = %x22 ; " + +basic-char = basic-unescaped / escaped +basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii +escaped = escape escape-seq-char + +escape = %x5C ; \ +escape-seq-char = %x22 ; " quotation mark U+0022 +escape-seq-char =/ %x5C ; \ reverse solidus U+005C +escape-seq-char =/ %x62 ; b backspace U+0008 +escape-seq-char =/ %x66 ; f form feed U+000C +escape-seq-char =/ %x6E ; n line feed U+000A +escape-seq-char =/ %x72 ; r carriage return U+000D +escape-seq-char =/ %x74 ; t tab U+0009 +escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX +escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX + +;; Multiline Basic String + +ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body + ml-basic-string-delim +ml-basic-string-delim = 3quotation-mark +ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] + +mlb-content = mlb-char / newline / mlb-escaped-nl +mlb-char = mlb-unescaped / escaped +mlb-quotes = 1*2quotation-mark +mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii +mlb-escaped-nl = escape ws newline *( wschar / newline ) + +;; Literal String + +literal-string = apostrophe *literal-char apostrophe + +apostrophe = %x27 ; ' apostrophe + +literal-char = %x09 / %x20-26 / %x28-7E / non-ascii + +;; Multiline Literal String + +ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body + ml-literal-string-delim +ml-literal-string-delim = 3apostrophe +ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ] + +mll-content = mll-char / newline +mll-char = %x09 / %x20-26 / %x28-7E / non-ascii +mll-quotes = 1*2apostrophe + +;; Integer + +integer = dec-int / hex-int / oct-int / bin-int + +minus = %x2D ; - +plus = %x2B ; + +underscore = %x5F ; _ +digit1-9 = %x31-39 ; 1-9 +digit0-7 = %x30-37 ; 0-7 +digit0-1 = %x30-31 ; 0-1 + +hex-prefix = %x30.78 ; 0x +oct-prefix = %x30.6F ; 0o +bin-prefix = %x30.62 ; 0b + +dec-int = [ minus / plus ] unsigned-dec-int +unsigned-dec-int = DIGIT / digit1-9 1*( DIGIT / underscore DIGIT ) + +hex-int = hex-prefix HEXDIG *( HEXDIG / underscore HEXDIG ) +oct-int = oct-prefix digit0-7 *( digit0-7 / underscore digit0-7 ) +bin-int = bin-prefix digit0-1 *( digit0-1 / underscore digit0-1 ) + +;; Float + +float = float-int-part ( exp / frac [ exp ] ) +float =/ special-float + +float-int-part = dec-int +frac = decimal-point zero-prefixable-int +decimal-point = %x2E ; . +zero-prefixable-int = DIGIT *( DIGIT / underscore DIGIT ) + +exp = "e" float-exp-part +float-exp-part = [ minus / plus ] zero-prefixable-int + +special-float = [ minus / plus ] ( inf / nan ) +inf = %x69.6e.66 ; inf +nan = %x6e.61.6e ; nan + +;; Boolean + +boolean = true / false + +true = %x74.72.75.65 ; true +false = %x66.61.6C.73.65 ; false + +;; Date and Time (as defined in RFC 3339) + +date-time = offset-date-time / local-date-time / local-date / local-time + +date-fullyear = 4DIGIT +date-month = 2DIGIT ; 01-12 +date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year +time-delim = "T" / %x20 ; T, t, or space +time-hour = 2DIGIT ; 00-23 +time-minute = 2DIGIT ; 00-59 +time-second = 2DIGIT ; 00-58, 00-59, 00-60 based on leap second rules +time-secfrac = "." 1*DIGIT +time-numoffset = ( "+" / "-" ) time-hour ":" time-minute +time-offset = "Z" / time-numoffset + +partial-time = time-hour ":" time-minute ":" time-second [ time-secfrac ] +full-date = date-fullyear "-" date-month "-" date-mday +full-time = partial-time time-offset + +;; Offset Date-Time + +offset-date-time = full-date time-delim full-time + +;; Local Date-Time + +local-date-time = full-date time-delim partial-time + +;; Local Date + +local-date = full-date + +;; Local Time + +local-time = partial-time + +;; Array + +array = array-open [ array-values ] ws-comment-newline array-close + +array-open = %x5B ; [ +array-close = %x5D ; ] + +array-values = ws-comment-newline val ws-comment-newline array-sep array-values +array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ] + +array-sep = %x2C ; , Comma + +ws-comment-newline = *( wschar / [ comment ] newline ) + +;; Table + +table = std-table / array-table + +;; Standard Table + +std-table = std-table-open key std-table-close + +std-table-open = %x5B ws ; [ Left square bracket +std-table-close = ws %x5D ; ] Right square bracket + +;; Inline Table + +inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close + +inline-table-open = %x7B ws ; { +inline-table-close = ws %x7D ; } +inline-table-sep = ws %x2C ws ; , Comma + +inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] + +;; Array Table + +array-table = array-table-open key array-table-close + +array-table-open = %x5B.5B ws ; [[ Double left square bracket +array-table-close = ws %x5D.5D ; ]] Double right square bracket + +;; Built-in ABNF terms, reproduced here for clarity + +ALPHA = %x41-5A / %x61-7A ; A-Z / a-z +DIGIT = %x30-39 ; 0-9 +HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F" diff --git a/vendor/github.com/pelletier/go-toml/v2/types.go b/vendor/github.com/pelletier/go-toml/v2/types.go new file mode 100644 index 00000000..630a4546 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/types.go @@ -0,0 +1,14 @@ +package toml + +import ( + "encoding" + "reflect" + "time" +) + +var timeType = reflect.TypeOf(time.Time{}) +var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() +var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() +var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) +var sliceInterfaceType = reflect.TypeOf([]interface{}{}) +var stringType = reflect.TypeOf("") diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go new file mode 100644 index 00000000..2219f704 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -0,0 +1,1189 @@ +package toml + +import ( + "encoding" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strings" + "sync/atomic" + "time" + + "github.com/pelletier/go-toml/v2/internal/ast" + "github.com/pelletier/go-toml/v2/internal/danger" + "github.com/pelletier/go-toml/v2/internal/tracker" +) + +// Unmarshal deserializes a TOML document into a Go value. +// +// It is a shortcut for Decoder.Decode() with the default options. +func Unmarshal(data []byte, v interface{}) error { + p := parser{} + p.Reset(data) + d := decoder{p: &p} + + return d.FromParser(v) +} + +// Decoder reads and decode a TOML document from an input stream. +type Decoder struct { + // input + r io.Reader + + // global settings + strict bool +} + +// NewDecoder creates a new Decoder that will read from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +// DisallowUnknownFields causes the Decoder to return an error when the +// destination is a struct and the input contains a key that does not match a +// non-ignored field. +// +// In that case, the Decoder returns a StrictMissingError that can be used to +// retrieve the individual errors as well as generate a human readable +// description of the missing fields. +func (d *Decoder) DisallowUnknownFields() *Decoder { + d.strict = true + return d +} + +// Decode the whole content of r into v. +// +// By default, values in the document that don't exist in the target Go value +// are ignored. See Decoder.DisallowUnknownFields() to change this behavior. +// +// When a TOML local date, time, or date-time is decoded into a time.Time, its +// value is represented in time.Local timezone. Otherwise the approriate Local* +// structure is used. For time values, precision up to the nanosecond is +// supported by truncating extra digits. +// +// Empty tables decoded in an interface{} create an empty initialized +// map[string]interface{}. +// +// Types implementing the encoding.TextUnmarshaler interface are decoded from a +// TOML string. +// +// When decoding a number, go-toml will return an error if the number is out of +// bounds for the target type (which includes negative numbers when decoding +// into an unsigned int). +// +// If an error occurs while decoding the content of the document, this function +// returns a toml.DecodeError, providing context about the issue. When using +// strict mode and a field is missing, a `toml.StrictMissingError` is +// returned. In any other case, this function returns a standard Go error. +// +// Type mapping +// +// List of supported TOML types and their associated accepted Go types: +// +// String -> string +// Integer -> uint*, int*, depending on size +// Float -> float*, depending on size +// Boolean -> bool +// Offset Date-Time -> time.Time +// Local Date-time -> LocalDateTime, time.Time +// Local Date -> LocalDate, time.Time +// Local Time -> LocalTime, time.Time +// Array -> slice and array, depending on elements types +// Table -> map and struct +// Inline Table -> same as Table +// Array of Tables -> same as Array and Table +func (d *Decoder) Decode(v interface{}) error { + b, err := ioutil.ReadAll(d.r) + if err != nil { + return fmt.Errorf("toml: %w", err) + } + + p := parser{} + p.Reset(b) + dec := decoder{ + p: &p, + strict: strict{ + Enabled: d.strict, + }, + } + + return dec.FromParser(v) +} + +type decoder struct { + // Which parser instance in use for this decoding session. + p *parser + + // Flag indicating that the current expression is stashed. + // If set to true, calling nextExpr will not actually pull a new expression + // but turn off the flag instead. + stashedExpr bool + + // Skip expressions until a table is found. This is set to true when a + // table could not be create (missing field in map), so all KV expressions + // need to be skipped. + skipUntilTable bool + + // Tracks position in Go arrays. + // This is used when decoding [[array tables]] into Go arrays. Given array + // tables are separate TOML expression, we need to keep track of where we + // are at in the Go array, as we can't just introspect its size. + arrayIndexes map[reflect.Value]int + + // Tracks keys that have been seen, with which type. + seen tracker.SeenTracker + + // Strict mode + strict strict + + // Current context for the error. + errorContext *errorContext +} + +type errorContext struct { + Struct reflect.Type + Field []int +} + +func (d *decoder) typeMismatchError(toml string, target reflect.Type) error { + if d.errorContext != nil && d.errorContext.Struct != nil { + ctx := d.errorContext + f := ctx.Struct.FieldByIndex(ctx.Field) + return fmt.Errorf("toml: cannot decode TOML %s into struct field %s.%s of type %s", toml, ctx.Struct, f.Name, f.Type) + } + return fmt.Errorf("toml: cannot decode TOML %s into a Go value of type %s", toml, target) +} + +func (d *decoder) expr() *ast.Node { + return d.p.Expression() +} + +func (d *decoder) nextExpr() bool { + if d.stashedExpr { + d.stashedExpr = false + return true + } + return d.p.NextExpression() +} + +func (d *decoder) stashExpr() { + d.stashedExpr = true +} + +func (d *decoder) arrayIndex(shouldAppend bool, v reflect.Value) int { + if d.arrayIndexes == nil { + d.arrayIndexes = make(map[reflect.Value]int, 1) + } + + idx, ok := d.arrayIndexes[v] + + if !ok { + d.arrayIndexes[v] = 0 + } else if shouldAppend { + idx++ + d.arrayIndexes[v] = idx + } + + return idx +} + +func (d *decoder) FromParser(v interface{}) error { + r := reflect.ValueOf(v) + if r.Kind() != reflect.Ptr { + return fmt.Errorf("toml: decoding can only be performed into a pointer, not %s", r.Kind()) + } + + if r.IsNil() { + return fmt.Errorf("toml: decoding pointer target cannot be nil") + } + + r = r.Elem() + if r.Kind() == reflect.Interface && r.IsNil() { + newMap := map[string]interface{}{} + r.Set(reflect.ValueOf(newMap)) + } + + err := d.fromParser(r) + if err == nil { + return d.strict.Error(d.p.data) + } + + var e *decodeError + if errors.As(err, &e) { + return wrapDecodeError(d.p.data, e) + } + + return err +} + +func (d *decoder) fromParser(root reflect.Value) error { + for d.nextExpr() { + err := d.handleRootExpression(d.expr(), root) + if err != nil { + return err + } + } + + return d.p.Error() +} + +/* +Rules for the unmarshal code: + +- The stack is used to keep track of which values need to be set where. +- handle* functions <=> switch on a given ast.Kind. +- unmarshalX* functions need to unmarshal a node of kind X. +- An "object" is either a struct or a map. +*/ + +func (d *decoder) handleRootExpression(expr *ast.Node, v reflect.Value) error { + var x reflect.Value + var err error + + if !(d.skipUntilTable && expr.Kind == ast.KeyValue) { + err = d.seen.CheckExpression(expr) + if err != nil { + return err + } + } + + switch expr.Kind { + case ast.KeyValue: + if d.skipUntilTable { + return nil + } + x, err = d.handleKeyValue(expr, v) + case ast.Table: + d.skipUntilTable = false + d.strict.EnterTable(expr) + x, err = d.handleTable(expr.Key(), v) + case ast.ArrayTable: + d.skipUntilTable = false + d.strict.EnterArrayTable(expr) + x, err = d.handleArrayTable(expr.Key(), v) + default: + panic(fmt.Errorf("parser should not permit expression of kind %s at document root", expr.Kind)) + } + + if d.skipUntilTable { + if expr.Kind == ast.Table || expr.Kind == ast.ArrayTable { + d.strict.MissingTable(expr) + } + } else if err == nil && x.IsValid() { + v.Set(x) + } + + return err +} + +func (d *decoder) handleArrayTable(key ast.Iterator, v reflect.Value) (reflect.Value, error) { + if key.Next() { + return d.handleArrayTablePart(key, v) + } + return d.handleKeyValues(v) +} + +func (d *decoder) handleArrayTableCollectionLast(key ast.Iterator, v reflect.Value) (reflect.Value, error) { + switch v.Kind() { + case reflect.Interface: + elem := v.Elem() + if !elem.IsValid() { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if elem.Kind() == reflect.Slice { + if elem.Type() != sliceInterfaceType { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if !elem.CanSet() { + nelem := reflect.New(sliceInterfaceType).Elem() + nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap())) + reflect.Copy(nelem, elem) + elem = nelem + } + } + return d.handleArrayTableCollectionLast(key, elem) + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + elem = ptr.Elem() + } + + elem, err := d.handleArrayTableCollectionLast(key, elem) + if err != nil { + return reflect.Value{}, err + } + v.Elem().Set(elem) + + return v, nil + case reflect.Slice: + elemType := v.Type().Elem() + var elem reflect.Value + if elemType.Kind() == reflect.Interface { + elem = makeMapStringInterface() + } else { + elem = reflect.New(elemType).Elem() + } + elem2, err := d.handleArrayTable(key, elem) + if err != nil { + return reflect.Value{}, err + } + if elem2.IsValid() { + elem = elem2 + } + return reflect.Append(v, elem), nil + case reflect.Array: + idx := d.arrayIndex(true, v) + if idx >= v.Len() { + return v, fmt.Errorf("toml: cannot decode array table into %s at position %d", v.Type(), idx) + } + elem := v.Index(idx) + _, err := d.handleArrayTable(key, elem) + return v, err + } + + return d.handleArrayTable(key, v) +} + +// When parsing an array table expression, each part of the key needs to be +// evaluated like a normal key, but if it returns a collection, it also needs to +// point to the last element of the collection. Unless it is the last part of +// the key, then it needs to create a new element at the end. +func (d *decoder) handleArrayTableCollection(key ast.Iterator, v reflect.Value) (reflect.Value, error) { + if key.IsLast() { + return d.handleArrayTableCollectionLast(key, v) + } + + switch v.Kind() { + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + elem = ptr.Elem() + } + + elem, err := d.handleArrayTableCollection(key, elem) + if err != nil { + return reflect.Value{}, err + } + if elem.IsValid() { + v.Elem().Set(elem) + } + + return v, nil + case reflect.Slice: + elem := v.Index(v.Len() - 1) + x, err := d.handleArrayTable(key, elem) + if err != nil || d.skipUntilTable { + return reflect.Value{}, err + } + if x.IsValid() { + elem.Set(x) + } + + return v, err + case reflect.Array: + idx := d.arrayIndex(false, v) + if idx >= v.Len() { + return v, fmt.Errorf("toml: cannot decode array table into %s at position %d", v.Type(), idx) + } + elem := v.Index(idx) + _, err := d.handleArrayTable(key, elem) + return v, err + } + + return d.handleArrayTable(key, v) +} + +func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) { + var rv reflect.Value + + // First, dispatch over v to make sure it is a valid object. + // There is no guarantee over what it could be. + switch v.Kind() { + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + v.Set(reflect.New(v.Type().Elem())) + } + elem = v.Elem() + return d.handleKeyPart(key, elem, nextFn, makeFn) + case reflect.Map: + vt := v.Type() + + // Create the key for the map element. Convert to key type. + mk := reflect.ValueOf(string(key.Node().Data)).Convert(vt.Key()) + + // If the map does not exist, create it. + if v.IsNil() { + vt := v.Type() + v = reflect.MakeMap(vt) + rv = v + } + + mv := v.MapIndex(mk) + set := false + if !mv.IsValid() { + // If there is no value in the map, create a new one according to + // the map type. If the element type is interface, create either a + // map[string]interface{} or a []interface{} depending on whether + // this is the last part of the array table key. + + t := vt.Elem() + if t.Kind() == reflect.Interface { + mv = makeFn() + } else { + mv = reflect.New(t).Elem() + } + set = true + } else if mv.Kind() == reflect.Interface { + mv = mv.Elem() + if !mv.IsValid() { + mv = makeFn() + } + set = true + } else if !mv.CanAddr() { + vt := v.Type() + t := vt.Elem() + oldmv := mv + mv = reflect.New(t).Elem() + mv.Set(oldmv) + set = true + } + + x, err := nextFn(key, mv) + if err != nil { + return reflect.Value{}, err + } + + if x.IsValid() { + mv = x + set = true + } + + if set { + v.SetMapIndex(mk, mv) + } + case reflect.Struct: + path, found := structFieldPath(v, string(key.Node().Data)) + if !found { + d.skipUntilTable = true + return reflect.Value{}, nil + } + + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + t := v.Type() + d.errorContext.Struct = t + d.errorContext.Field = path + + f := v.FieldByIndex(path) + x, err := nextFn(key, f) + if err != nil || d.skipUntilTable { + return reflect.Value{}, err + } + if x.IsValid() { + f.Set(x) + } + d.errorContext.Field = nil + d.errorContext.Struct = nil + case reflect.Interface: + if v.Elem().IsValid() { + v = v.Elem() + } else { + v = makeMapStringInterface() + } + + x, err := d.handleKeyPart(key, v, nextFn, makeFn) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + v = x + } + rv = v + default: + panic(fmt.Errorf("unhandled part: %s", v.Kind())) + } + + return rv, nil +} + +// HandleArrayTablePart navigates the Go structure v using the key v. It is +// only used for the prefix (non-last) parts of an array-table. When +// encountering a collection, it should go to the last element. +func (d *decoder) handleArrayTablePart(key ast.Iterator, v reflect.Value) (reflect.Value, error) { + var makeFn valueMakerFn + if key.IsLast() { + makeFn = makeSliceInterface + } else { + makeFn = makeMapStringInterface + } + return d.handleKeyPart(key, v, d.handleArrayTableCollection, makeFn) +} + +// HandleTable returns a reference when it has checked the next expression but +// cannot handle it. +func (d *decoder) handleTable(key ast.Iterator, v reflect.Value) (reflect.Value, error) { + if v.Kind() == reflect.Slice { + if v.Len() == 0 { + return reflect.Value{}, newDecodeError(key.Node().Data, "cannot store a table in a slice") + } + elem := v.Index(v.Len() - 1) + x, err := d.handleTable(key, elem) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + elem.Set(x) + } + return reflect.Value{}, nil + } + if key.Next() { + // Still scoping the key + return d.handleTablePart(key, v) + } + // Done scoping the key. + // Now handle all the key-value expressions in this table. + return d.handleKeyValues(v) +} + +// Handle root expressions until the end of the document or the next +// non-key-value. +func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { + var rv reflect.Value + for d.nextExpr() { + expr := d.expr() + if expr.Kind != ast.KeyValue { + // Stash the expression so that fromParser can just loop and use + // the right handler. + // We could just recurse ourselves here, but at least this gives a + // chance to pop the stack a bit. + d.stashExpr() + break + } + + err := d.seen.CheckExpression(expr) + if err != nil { + return reflect.Value{}, err + } + + x, err := d.handleKeyValue(expr, v) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + v = x + rv = x + } + } + return rv, nil +} + +type ( + handlerFn func(key ast.Iterator, v reflect.Value) (reflect.Value, error) + valueMakerFn func() reflect.Value +) + +func makeMapStringInterface() reflect.Value { + return reflect.MakeMap(mapStringInterfaceType) +} + +func makeSliceInterface() reflect.Value { + return reflect.MakeSlice(sliceInterfaceType, 0, 16) +} + +func (d *decoder) handleTablePart(key ast.Iterator, v reflect.Value) (reflect.Value, error) { + return d.handleKeyPart(key, v, d.handleTable, makeMapStringInterface) +} + +func (d *decoder) tryTextUnmarshaler(node *ast.Node, v reflect.Value) (bool, error) { + // Special case for time, because we allow to unmarshal to it from + // different kind of AST nodes. + if v.Type() == timeType { + return false, nil + } + + if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) { + err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data) + if err != nil { + return false, newDecodeError(d.p.Raw(node.Raw), "%w", err) + } + + return true, nil + } + + return false, nil +} + +func (d *decoder) handleValue(value *ast.Node, v reflect.Value) error { + for v.Kind() == reflect.Ptr { + v = initAndDereferencePointer(v) + } + + ok, err := d.tryTextUnmarshaler(value, v) + if ok || err != nil { + return err + } + + switch value.Kind { + case ast.String: + return d.unmarshalString(value, v) + case ast.Integer: + return d.unmarshalInteger(value, v) + case ast.Float: + return d.unmarshalFloat(value, v) + case ast.Bool: + return d.unmarshalBool(value, v) + case ast.DateTime: + return d.unmarshalDateTime(value, v) + case ast.LocalDate: + return d.unmarshalLocalDate(value, v) + case ast.LocalTime: + return d.unmarshalLocalTime(value, v) + case ast.LocalDateTime: + return d.unmarshalLocalDateTime(value, v) + case ast.InlineTable: + return d.unmarshalInlineTable(value, v) + case ast.Array: + return d.unmarshalArray(value, v) + default: + panic(fmt.Errorf("handleValue not implemented for %s", value.Kind)) + } +} + +func (d *decoder) unmarshalArray(array *ast.Node, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice: + if v.IsNil() { + v.Set(reflect.MakeSlice(v.Type(), 0, 16)) + } else { + v.SetLen(0) + } + case reflect.Array: + // arrays are always initialized + case reflect.Interface: + elem := v.Elem() + if !elem.IsValid() { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if elem.Kind() == reflect.Slice { + if elem.Type() != sliceInterfaceType { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if !elem.CanSet() { + nelem := reflect.New(sliceInterfaceType).Elem() + nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap())) + reflect.Copy(nelem, elem) + elem = nelem + } + } + err := d.unmarshalArray(array, elem) + if err != nil { + return err + } + v.Set(elem) + return nil + default: + // TODO: use newDecodeError, but first the parser needs to fill + // array.Data. + return d.typeMismatchError("array", v.Type()) + } + + elemType := v.Type().Elem() + + it := array.Children() + idx := 0 + for it.Next() { + n := it.Node() + + // TODO: optimize + if v.Kind() == reflect.Slice { + elem := reflect.New(elemType).Elem() + + err := d.handleValue(n, elem) + if err != nil { + return err + } + + v.Set(reflect.Append(v, elem)) + } else { // array + if idx >= v.Len() { + return nil + } + elem := v.Index(idx) + err := d.handleValue(n, elem) + if err != nil { + return err + } + idx++ + } + } + + return nil +} + +func (d *decoder) unmarshalInlineTable(itable *ast.Node, v reflect.Value) error { + // Make sure v is an initialized object. + switch v.Kind() { + case reflect.Map: + if v.IsNil() { + v.Set(reflect.MakeMap(v.Type())) + } + case reflect.Struct: + // structs are always initialized. + case reflect.Interface: + elem := v.Elem() + if !elem.IsValid() { + elem = makeMapStringInterface() + v.Set(elem) + } + return d.unmarshalInlineTable(itable, elem) + default: + return newDecodeError(itable.Data, "cannot store inline table in Go type %s", v.Kind()) + } + + it := itable.Children() + for it.Next() { + n := it.Node() + + x, err := d.handleKeyValue(n, v) + if err != nil { + return err + } + if x.IsValid() { + v = x + } + } + + return nil +} + +func (d *decoder) unmarshalDateTime(value *ast.Node, v reflect.Value) error { + dt, err := parseDateTime(value.Data) + if err != nil { + return err + } + + v.Set(reflect.ValueOf(dt)) + return nil +} + +func (d *decoder) unmarshalLocalDate(value *ast.Node, v reflect.Value) error { + ld, err := parseLocalDate(value.Data) + if err != nil { + return err + } + + if v.Type() == timeType { + cast := ld.AsTime(time.Local) + v.Set(reflect.ValueOf(cast)) + return nil + } + + v.Set(reflect.ValueOf(ld)) + + return nil +} + +func (d *decoder) unmarshalLocalTime(value *ast.Node, v reflect.Value) error { + lt, rest, err := parseLocalTime(value.Data) + if err != nil { + return err + } + + if len(rest) > 0 { + return newDecodeError(rest, "extra characters at the end of a local time") + } + + v.Set(reflect.ValueOf(lt)) + return nil +} + +func (d *decoder) unmarshalLocalDateTime(value *ast.Node, v reflect.Value) error { + ldt, rest, err := parseLocalDateTime(value.Data) + if err != nil { + return err + } + + if len(rest) > 0 { + return newDecodeError(rest, "extra characters at the end of a local date time") + } + + if v.Type() == timeType { + cast := ldt.AsTime(time.Local) + + v.Set(reflect.ValueOf(cast)) + return nil + } + + v.Set(reflect.ValueOf(ldt)) + + return nil +} + +func (d *decoder) unmarshalBool(value *ast.Node, v reflect.Value) error { + b := value.Data[0] == 't' + + switch v.Kind() { + case reflect.Bool: + v.SetBool(b) + case reflect.Interface: + v.Set(reflect.ValueOf(b)) + default: + return newDecodeError(value.Data, "cannot assign boolean to a %t", b) + } + + return nil +} + +func (d *decoder) unmarshalFloat(value *ast.Node, v reflect.Value) error { + f, err := parseFloat(value.Data) + if err != nil { + return err + } + + switch v.Kind() { + case reflect.Float64: + v.SetFloat(f) + case reflect.Float32: + if f > math.MaxFloat32 { + return newDecodeError(value.Data, "number %f does not fit in a float32", f) + } + v.SetFloat(f) + case reflect.Interface: + v.Set(reflect.ValueOf(f)) + default: + return newDecodeError(value.Data, "float cannot be assigned to %s", v.Kind()) + } + + return nil +} + +func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error { + const ( + maxInt = int64(^uint(0) >> 1) + minInt = -maxInt - 1 + ) + + i, err := parseInteger(value.Data) + if err != nil { + return err + } + + var r reflect.Value + + switch v.Kind() { + case reflect.Int64: + v.SetInt(i) + return nil + case reflect.Int32: + if i < math.MinInt32 || i > math.MaxInt32 { + return fmt.Errorf("toml: number %d does not fit in an int32", i) + } + + r = reflect.ValueOf(int32(i)) + case reflect.Int16: + if i < math.MinInt16 || i > math.MaxInt16 { + return fmt.Errorf("toml: number %d does not fit in an int16", i) + } + + r = reflect.ValueOf(int16(i)) + case reflect.Int8: + if i < math.MinInt8 || i > math.MaxInt8 { + return fmt.Errorf("toml: number %d does not fit in an int8", i) + } + + r = reflect.ValueOf(int8(i)) + case reflect.Int: + if i < minInt || i > maxInt { + return fmt.Errorf("toml: number %d does not fit in an int", i) + } + + r = reflect.ValueOf(int(i)) + case reflect.Uint64: + if i < 0 { + return fmt.Errorf("toml: negative number %d does not fit in an uint64", i) + } + + r = reflect.ValueOf(uint64(i)) + case reflect.Uint32: + if i < 0 || i > math.MaxUint32 { + return fmt.Errorf("toml: negative number %d does not fit in an uint32", i) + } + + r = reflect.ValueOf(uint32(i)) + case reflect.Uint16: + if i < 0 || i > math.MaxUint16 { + return fmt.Errorf("toml: negative number %d does not fit in an uint16", i) + } + + r = reflect.ValueOf(uint16(i)) + case reflect.Uint8: + if i < 0 || i > math.MaxUint8 { + return fmt.Errorf("toml: negative number %d does not fit in an uint8", i) + } + + r = reflect.ValueOf(uint8(i)) + case reflect.Uint: + if i < 0 { + return fmt.Errorf("toml: negative number %d does not fit in an uint", i) + } + + r = reflect.ValueOf(uint(i)) + case reflect.Interface: + r = reflect.ValueOf(i) + default: + return d.typeMismatchError("integer", v.Type()) + } + + if !r.Type().AssignableTo(v.Type()) { + r = r.Convert(v.Type()) + } + + v.Set(r) + + return nil +} + +func (d *decoder) unmarshalString(value *ast.Node, v reflect.Value) error { + switch v.Kind() { + case reflect.String: + v.SetString(string(value.Data)) + case reflect.Interface: + v.Set(reflect.ValueOf(string(value.Data))) + default: + return newDecodeError(d.p.Raw(value.Raw), "cannot store TOML string into a Go %s", v.Kind()) + } + + return nil +} + +func (d *decoder) handleKeyValue(expr *ast.Node, v reflect.Value) (reflect.Value, error) { + d.strict.EnterKeyValue(expr) + + v, err := d.handleKeyValueInner(expr.Key(), expr.Value(), v) + if d.skipUntilTable { + d.strict.MissingField(expr) + d.skipUntilTable = false + } + + d.strict.ExitKeyValue(expr) + + return v, err +} + +func (d *decoder) handleKeyValueInner(key ast.Iterator, value *ast.Node, v reflect.Value) (reflect.Value, error) { + if key.Next() { + // Still scoping the key + return d.handleKeyValuePart(key, value, v) + } + // Done scoping the key. + // v is whatever Go value we need to fill. + return reflect.Value{}, d.handleValue(value, v) +} + +func (d *decoder) handleKeyValuePart(key ast.Iterator, value *ast.Node, v reflect.Value) (reflect.Value, error) { + // contains the replacement for v + var rv reflect.Value + + // First, dispatch over v to make sure it is a valid object. + // There is no guarantee over what it could be. + switch v.Kind() { + case reflect.Map: + vt := v.Type() + + mk := reflect.ValueOf(string(key.Node().Data)) + mkt := stringType + + keyType := vt.Key() + if !mkt.AssignableTo(keyType) { + if !mkt.ConvertibleTo(keyType) { + return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", mkt, keyType) + } + + mk = mk.Convert(keyType) + } + + // If the map does not exist, create it. + if v.IsNil() { + v = reflect.MakeMap(vt) + rv = v + } + + mv := v.MapIndex(mk) + set := false + if !mv.IsValid() { + set = true + mv = reflect.New(v.Type().Elem()).Elem() + } else { + if key.IsLast() { + var x interface{} + mv = reflect.ValueOf(&x).Elem() + set = true + } + } + + nv, err := d.handleKeyValueInner(key, value, mv) + if err != nil { + return reflect.Value{}, err + } + if nv.IsValid() { + mv = nv + set = true + } + + if set { + v.SetMapIndex(mk, mv) + } + case reflect.Struct: + path, found := structFieldPath(v, string(key.Node().Data)) + if !found { + d.skipUntilTable = true + break + } + + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + t := v.Type() + d.errorContext.Struct = t + d.errorContext.Field = path + + f := v.FieldByIndex(path) + x, err := d.handleKeyValueInner(key, value, f) + if err != nil { + return reflect.Value{}, err + } + + if x.IsValid() { + f.Set(x) + } + d.errorContext.Struct = nil + d.errorContext.Field = nil + case reflect.Interface: + v = v.Elem() + + // Following encoding/json: decoding an object into an + // interface{}, it needs to always hold a + // map[string]interface{}. This is for the types to be + // consistent whether a previous value was set or not. + if !v.IsValid() || v.Type() != mapStringInterfaceType { + v = makeMapStringInterface() + } + + x, err := d.handleKeyValuePart(key, value, v) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + v = x + } + rv = v + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + rv = v + elem = ptr.Elem() + } + + elem2, err := d.handleKeyValuePart(key, value, elem) + if err != nil { + return reflect.Value{}, err + } + if elem2.IsValid() { + elem = elem2 + } + v.Elem().Set(elem) + default: + return reflect.Value{}, fmt.Errorf("unhandled kv part: %s", v.Kind()) + } + + return rv, nil +} + +func initAndDereferencePointer(v reflect.Value) reflect.Value { + var elem reflect.Value + if v.IsNil() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + } + elem = v.Elem() + return elem +} + +type fieldPathsMap = map[string][]int + +var globalFieldPathsCache atomic.Value // map[danger.TypeID]fieldPathsMap + +func structFieldPath(v reflect.Value, name string) ([]int, bool) { + t := v.Type() + + cache, _ := globalFieldPathsCache.Load().(map[danger.TypeID]fieldPathsMap) + fieldPaths, ok := cache[danger.MakeTypeID(t)] + + if !ok { + fieldPaths = map[string][]int{} + + forEachField(t, nil, func(name string, path []int) { + fieldPaths[name] = path + // extra copy for the case-insensitive match + fieldPaths[strings.ToLower(name)] = path + }) + + newCache := make(map[danger.TypeID]fieldPathsMap, len(cache)+1) + newCache[danger.MakeTypeID(t)] = fieldPaths + for k, v := range cache { + newCache[k] = v + } + globalFieldPathsCache.Store(newCache) + } + + path, ok := fieldPaths[name] + if !ok { + path, ok = fieldPaths[strings.ToLower(name)] + } + return path, ok +} + +func forEachField(t reflect.Type, path []int, do func(name string, path []int)) { + n := t.NumField() + for i := 0; i < n; i++ { + f := t.Field(i) + + if !f.Anonymous && f.PkgPath != "" { + // only consider exported fields. + continue + } + + fieldPath := append(path, i) + fieldPath = fieldPath[:len(fieldPath):len(fieldPath)] + + if f.Anonymous { + forEachField(f.Type, fieldPath, do) + continue + } + + name := f.Tag.Get("toml") + if name == "-" { + continue + } + + if i := strings.IndexByte(name, ','); i >= 0 { + name = name[:i] + } + if name == "" { + name = f.Name + } + + do(name, fieldPath) + } +} diff --git a/vendor/github.com/pelletier/go-toml/v2/utf8.go b/vendor/github.com/pelletier/go-toml/v2/utf8.go new file mode 100644 index 00000000..d47a4f20 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/utf8.go @@ -0,0 +1,240 @@ +package toml + +import ( + "unicode/utf8" +) + +type utf8Err struct { + Index int + Size int +} + +func (u utf8Err) Zero() bool { + return u.Size == 0 +} + +// Verified that a given string is only made of valid UTF-8 characters allowed +// by the TOML spec: +// +// Any Unicode character may be used except those that must be escaped: +// quotation mark, backslash, and the control characters other than tab (U+0000 +// to U+0008, U+000A to U+001F, U+007F). +// +// It is a copy of the Go 1.17 utf8.Valid implementation, tweaked to exit early +// when a character is not allowed. +// +// The returned utf8Err is Zero() if the string is valid, or contains the byte +// index and size of the invalid character. +// +// quotation mark => already checked +// backslash => already checked +// 0-0x8 => invalid +// 0x9 => tab, ok +// 0xA - 0x1F => invalid +// 0x7F => invalid +func utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { + // Fast path. Check for and skip 8 bytes of ASCII characters per iteration. + offset := 0 + for len(p) >= 8 { + // Combining two 32 bit loads allows the same code to be used + // for 32 and 64 bit platforms. + // The compiler can generate a 32bit load for first32 and second32 + // on many platforms. See test/codegen/memcombine.go. + first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 + second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24 + if (first32|second32)&0x80808080 != 0 { + // Found a non ASCII byte (>= RuneSelf). + break + } + + for i, b := range p[:8] { + if invalidAscii(b) { + err.Index = offset + i + err.Size = 1 + return + } + } + + p = p[8:] + offset += 8 + } + n := len(p) + for i := 0; i < n; { + pi := p[i] + if pi < utf8.RuneSelf { + if invalidAscii(pi) { + err.Index = offset + i + err.Size = 1 + return + } + i++ + continue + } + x := first[pi] + if x == xx { + // Illegal starter byte. + err.Index = offset + i + err.Size = 1 + return + } + size := int(x & 7) + if i+size > n { + // Short or invalid. + err.Index = offset + i + err.Size = n - i + return + } + accept := acceptRanges[x>>4] + if c := p[i+1]; c < accept.lo || accept.hi < c { + err.Index = offset + i + err.Size = 2 + return + } else if size == 2 { + } else if c := p[i+2]; c < locb || hicb < c { + err.Index = offset + i + err.Size = 3 + return + } else if size == 3 { + } else if c := p[i+3]; c < locb || hicb < c { + err.Index = offset + i + err.Size = 4 + return + } + i += size + } + return +} + +// Return the size of the next rune if valid, 0 otherwise. +func utf8ValidNext(p []byte) int { + c := p[0] + + if c < utf8.RuneSelf { + if invalidAscii(c) { + return 0 + } + return 1 + } + + x := first[c] + if x == xx { + // Illegal starter byte. + return 0 + } + size := int(x & 7) + if size > len(p) { + // Short or invalid. + return 0 + } + accept := acceptRanges[x>>4] + if c := p[1]; c < accept.lo || accept.hi < c { + return 0 + } else if size == 2 { + } else if c := p[2]; c < locb || hicb < c { + return 0 + } else if size == 3 { + } else if c := p[3]; c < locb || hicb < c { + return 0 + } + + return size +} + +var invalidAsciiTable = [256]bool{ + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + // 0x09 TAB + // 0x0A LF + 0x0B: true, + 0x0C: true, + // 0x0D CR + 0x0E: true, + 0x0F: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1A: true, + 0x1B: true, + 0x1C: true, + 0x1D: true, + 0x1E: true, + 0x1F: true, + // 0x20 - 0x7E Printable ASCII characters + 0x7F: true, +} + +func invalidAscii(b byte) bool { + return invalidAsciiTable[b] +} + +// acceptRange gives the range of valid values for the second byte in a UTF-8 +// sequence. +type acceptRange struct { + lo uint8 // lowest value for second byte. + hi uint8 // highest value for second byte. +} + +// acceptRanges has size 16 to avoid bounds checks in the code that uses it. +var acceptRanges = [16]acceptRange{ + 0: {locb, hicb}, + 1: {0xA0, hicb}, + 2: {locb, 0x9F}, + 3: {0x90, hicb}, + 4: {locb, 0x8F}, +} + +// first is information about the first byte in a UTF-8 sequence. +var first = [256]uint8{ + // 1 2 3 4 5 6 7 8 9 A B C D E F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F + // 1 2 3 4 5 6 7 8 9 A B C D E F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF + xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF + s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF + s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF + s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF +} + +const ( + // The default lowest and highest continuation byte. + locb = 0b10000000 + hicb = 0b10111111 + + // These names of these constants are chosen to give nice alignment in the + // table below. The first nibble is an index into acceptRanges or F for + // special one-byte cases. The second nibble is the Rune length or the + // Status for the special one-byte case. + xx = 0xF1 // invalid: size 1 + as = 0xF0 // ASCII: size 1 + s1 = 0x02 // accept 0, size 2 + s2 = 0x13 // accept 1, size 3 + s3 = 0x03 // accept 0, size 3 + s4 = 0x23 // accept 2, size 3 + s5 = 0x34 // accept 3, size 4 + s6 = 0x04 // accept 0, size 4 + s7 = 0x44 // accept 4, size 4 +) diff --git a/vendor/github.com/slack-go/slack/apps.go b/vendor/github.com/slack-go/slack/apps.go index 4f60da0a..10d42975 100644 --- a/vendor/github.com/slack-go/slack/apps.go +++ b/vendor/github.com/slack-go/slack/apps.go @@ -44,6 +44,10 @@ func (api *Client) ListEventAuthorizationsContext(ctx context.Context, eventCont } func (api *Client) UninstallApp(clientID, clientSecret string) error { + return api.UninstallAppContext(context.Background(), clientID, clientSecret) +} + +func (api *Client) UninstallAppContext(ctx context.Context, clientID, clientSecret string) error { values := url.Values{ "client_id": {clientID}, "client_secret": {clientSecret}, @@ -51,7 +55,7 @@ func (api *Client) UninstallApp(clientID, clientSecret string) error { response := SlackResponse{} - err := api.getMethod(context.Background(), "apps.uninstall", api.token, values, &response) + err := api.getMethod(ctx, "apps.uninstall", api.token, values, &response) if err != nil { return err } diff --git a/vendor/github.com/slack-go/slack/audit.go b/vendor/github.com/slack-go/slack/audit.go index 041ffd77..a3ea7ebd 100644 --- a/vendor/github.com/slack-go/slack/audit.go +++ b/vendor/github.com/slack-go/slack/audit.go @@ -39,6 +39,16 @@ type AuditEntry struct { UA string `json:"ua"` IPAddress string `json:"ip_address"` } `json:"context"` + Details struct { + NewValue interface{} `json:"new_value"` + PreviousValue interface{} `json:"previous_value"` + MobileOnly bool `json:"mobile_only"` + WebOnly bool `json:"web_only"` + NonSSOOnly bool `json:"non_sso_only"` + ExportType string `json:"export_type"` + ExportStart string `json:"export_start_ts"` + ExportEnd string `json:"export_end_ts"` + } `json:"details"` } type AuditUser struct { diff --git a/vendor/github.com/slack-go/slack/block_element.go b/vendor/github.com/slack-go/slack/block_element.go index 21abb018..643529ff 100644 --- a/vendor/github.com/slack-go/slack/block_element.go +++ b/vendor/github.com/slack-go/slack/block_element.go @@ -167,6 +167,12 @@ func (s *ButtonBlockElement) WithStyle(style Style) *ButtonBlockElement { return s } +// WithConfirm adds a confirmation dialogue to the button object and returns the modified ButtonBlockElement +func (s *ButtonBlockElement) WithConfirm(confirm *ConfirmationBlockObject) *ButtonBlockElement { + s.Confirm = confirm + return s +} + // NewButtonBlockElement returns an instance of a new button element to be used within a block func NewButtonBlockElement(actionID, value string, text *TextBlockObject) *ButtonBlockElement { return &ButtonBlockElement{ diff --git a/vendor/github.com/slack-go/slack/block_object.go b/vendor/github.com/slack-go/slack/block_object.go index 5ced7f92..f70405eb 100644 --- a/vendor/github.com/slack-go/slack/block_object.go +++ b/vendor/github.com/slack-go/slack/block_object.go @@ -187,8 +187,9 @@ func (s ConfirmationBlockObject) validateType() MessageObjectType { } // WithStyle add styling to confirmation object -func (s *ConfirmationBlockObject) WithStyle(style Style) { +func (s *ConfirmationBlockObject) WithStyle(style Style) *ConfirmationBlockObject { s.Style = style + return s } // NewConfirmationBlockObject returns an instance of a new Confirmation Block Object diff --git a/vendor/github.com/slack-go/slack/chat.go b/vendor/github.com/slack-go/slack/chat.go index 493b65b6..34848d15 100644 --- a/vendor/github.com/slack-go/slack/chat.go +++ b/vendor/github.com/slack-go/slack/chat.go @@ -86,12 +86,7 @@ func NewPostMessageParameters() PostMessageParameters { // DeleteMessage deletes a message in a channel func (api *Client) DeleteMessage(channel, messageTimestamp string) (string, string, error) { - respChannel, respTimestamp, _, err := api.SendMessageContext( - context.Background(), - channel, - MsgOptionDelete(messageTimestamp), - ) - return respChannel, respTimestamp, err + return api.DeleteMessageContext(context.Background(), channel, messageTimestamp) } // DeleteMessageContext deletes a message in a channel with a custom context @@ -108,8 +103,15 @@ func (api *Client) DeleteMessageContext(ctx context.Context, channel, messageTim // Message is escaped by default according to https://api.slack.com/docs/formatting // Use http://davestevens.github.io/slack-message-builder/ to help crafting your message. func (api *Client) ScheduleMessage(channelID, postAt string, options ...MsgOption) (string, string, error) { + return api.ScheduleMessageContext(context.Background(), channelID, postAt, options...) +} + +// ScheduleMessageContext sends a message to a channel with a custom context +// +// For more details, see ScheduleMessage documentation. +func (api *Client) ScheduleMessageContext(ctx context.Context, channelID, postAt string, options ...MsgOption) (string, string, error) { respChannel, respTimestamp, _, err := api.SendMessageContext( - context.Background(), + ctx, channelID, MsgOptionSchedule(postAt), MsgOptionCompose(options...), @@ -121,13 +123,7 @@ func (api *Client) ScheduleMessage(channelID, postAt string, options ...MsgOptio // Message is escaped by default according to https://api.slack.com/docs/formatting // Use http://davestevens.github.io/slack-message-builder/ to help crafting your message. func (api *Client) PostMessage(channelID string, options ...MsgOption) (string, string, error) { - respChannel, respTimestamp, _, err := api.SendMessageContext( - context.Background(), - channelID, - MsgOptionPost(), - MsgOptionCompose(options...), - ) - return respChannel, respTimestamp, err + return api.PostMessageContext(context.Background(), channelID, options...) } // PostMessageContext sends a message to a channel with a custom context @@ -146,12 +142,7 @@ func (api *Client) PostMessageContext(ctx context.Context, channelID string, opt // Message is escaped by default according to https://api.slack.com/docs/formatting // Use http://davestevens.github.io/slack-message-builder/ to help crafting your message. func (api *Client) PostEphemeral(channelID, userID string, options ...MsgOption) (string, error) { - return api.PostEphemeralContext( - context.Background(), - channelID, - userID, - options..., - ) + return api.PostEphemeralContext(context.Background(), channelID, userID, options...) } // PostEphemeralContext sends an ephemeal message to a user in a channel with a custom context @@ -168,12 +159,7 @@ func (api *Client) PostEphemeralContext(ctx context.Context, channelID, userID s // UpdateMessage updates a message in a channel func (api *Client) UpdateMessage(channelID, timestamp string, options ...MsgOption) (string, string, string, error) { - return api.SendMessageContext( - context.Background(), - channelID, - MsgOptionUpdate(timestamp), - MsgOptionCompose(options...), - ) + return api.UpdateMessageContext(context.Background(), channelID, timestamp, options...) } // UpdateMessageContext updates a message in a channel @@ -225,7 +211,7 @@ func (api *Client) SendMessageContext(ctx context.Context, channelID string, opt response chatResponseFull ) - if req, parser, err = buildSender(api.endpoint, options...).BuildRequest(api.token, channelID); err != nil { + if req, parser, err = buildSender(api.endpoint, options...).BuildRequestContext(ctx, api.token, channelID); err != nil { return "", "", "", err } @@ -306,6 +292,10 @@ type sendConfig struct { } func (t sendConfig) BuildRequest(token, channelID string) (req *http.Request, _ func(*chatResponseFull) responseParser, err error) { + return t.BuildRequestContext(context.Background(), token, channelID) +} + +func (t sendConfig) BuildRequestContext(ctx context.Context, token, channelID string) (req *http.Request, _ func(*chatResponseFull) responseParser, err error) { if t, err = applyMsgOptions(token, channelID, t.apiurl, t.options...); err != nil { return nil, nil, err } @@ -320,9 +310,9 @@ func (t sendConfig) BuildRequest(token, channelID string) (req *http.Request, _ responseType: t.responseType, replaceOriginal: t.replaceOriginal, deleteOriginal: t.deleteOriginal, - }.BuildRequest() + }.BuildRequestContext(ctx) default: - return formSender{endpoint: t.endpoint, values: t.values}.BuildRequest() + return formSender{endpoint: t.endpoint, values: t.values}.BuildRequestContext(ctx) } } @@ -332,7 +322,11 @@ type formSender struct { } func (t formSender) BuildRequest() (*http.Request, func(*chatResponseFull) responseParser, error) { - req, err := formReq(t.endpoint, t.values) + return t.BuildRequestContext(context.Background()) +} + +func (t formSender) BuildRequestContext(ctx context.Context) (*http.Request, func(*chatResponseFull) responseParser, error) { + req, err := formReq(ctx, t.endpoint, t.values) return req, func(resp *chatResponseFull) responseParser { return newJSONParser(resp) }, err @@ -349,7 +343,11 @@ type responseURLSender struct { } func (t responseURLSender) BuildRequest() (*http.Request, func(*chatResponseFull) responseParser, error) { - req, err := jsonReq(t.endpoint, Msg{ + return t.BuildRequestContext(context.Background()) +} + +func (t responseURLSender) BuildRequestContext(ctx context.Context) (*http.Request, func(*chatResponseFull) responseParser, error) { + req, err := jsonReq(ctx, t.endpoint, Msg{ Text: t.values.Get("text"), Timestamp: t.values.Get("ts"), Attachments: t.attachments, diff --git a/vendor/github.com/slack-go/slack/files.go b/vendor/github.com/slack-go/slack/files.go index 00c255bc..e7e71c49 100644 --- a/vendor/github.com/slack-go/slack/files.go +++ b/vendor/github.com/slack-go/slack/files.go @@ -202,7 +202,14 @@ func (api *Client) GetFileInfoContext(ctx context.Context, fileID string, count, // GetFile retreives a given file from its private download URL func (api *Client) GetFile(downloadURL string, writer io.Writer) error { - return downloadFile(api.httpclient, api.token, downloadURL, writer, api) + return api.GetFileContext(context.Background(), downloadURL, writer) +} + +// GetFileContext retreives a given file from its private download URL with a custom context +// +// For more details, see GetFile documentation. +func (api *Client) GetFileContext(ctx context.Context, downloadURL string, writer io.Writer) error { + return downloadFile(ctx, api.httpclient, api.token, downloadURL, writer, api) } // GetFiles retrieves all files according to the parameters given @@ -210,40 +217,6 @@ func (api *Client) GetFiles(params GetFilesParameters) ([]File, *Paging, error) return api.GetFilesContext(context.Background(), params) } -// ListFiles retrieves all files according to the parameters given. Uses cursor based pagination. -func (api *Client) ListFiles(params ListFilesParameters) ([]File, *ListFilesParameters, error) { - return api.ListFilesContext(context.Background(), params) -} - -// ListFilesContext retrieves all files according to the parameters given with a custom context. Uses cursor based pagination. -func (api *Client) ListFilesContext(ctx context.Context, params ListFilesParameters) ([]File, *ListFilesParameters, error) { - values := url.Values{ - "token": {api.token}, - } - - if params.User != DEFAULT_FILES_USER { - values.Add("user", params.User) - } - if params.Channel != DEFAULT_FILES_CHANNEL { - values.Add("channel", params.Channel) - } - if params.Limit != DEFAULT_FILES_COUNT { - values.Add("limit", strconv.Itoa(params.Limit)) - } - if params.Cursor != "" { - values.Add("cursor", params.Cursor) - } - - response, err := api.fileRequest(ctx, "files.list", values) - if err != nil { - return nil, nil, err - } - - params.Cursor = response.Metadata.Cursor - - return response.Files, ¶ms, nil -} - // GetFilesContext retrieves all files according to the parameters given with a custom context func (api *Client) GetFilesContext(ctx context.Context, params GetFilesParameters) ([]File, *Paging, error) { values := url.Values{ @@ -281,6 +254,42 @@ func (api *Client) GetFilesContext(ctx context.Context, params GetFilesParameter return response.Files, &response.Paging, nil } +// ListFiles retrieves all files according to the parameters given. Uses cursor based pagination. +func (api *Client) ListFiles(params ListFilesParameters) ([]File, *ListFilesParameters, error) { + return api.ListFilesContext(context.Background(), params) +} + +// ListFilesContext retrieves all files according to the parameters given with a custom context. +// +// For more details, see ListFiles documentation. +func (api *Client) ListFilesContext(ctx context.Context, params ListFilesParameters) ([]File, *ListFilesParameters, error) { + values := url.Values{ + "token": {api.token}, + } + + if params.User != DEFAULT_FILES_USER { + values.Add("user", params.User) + } + if params.Channel != DEFAULT_FILES_CHANNEL { + values.Add("channel", params.Channel) + } + if params.Limit != DEFAULT_FILES_COUNT { + values.Add("limit", strconv.Itoa(params.Limit)) + } + if params.Cursor != "" { + values.Add("cursor", params.Cursor) + } + + response, err := api.fileRequest(ctx, "files.list", values) + if err != nil { + return nil, nil, err + } + + params.Cursor = response.Metadata.Cursor + + return response.Files, ¶ms, nil +} + // UploadFile uploads a file func (api *Client) UploadFile(params FileUploadParameters) (file *File, err error) { return api.UploadFileContext(context.Background(), params) diff --git a/vendor/github.com/slack-go/slack/info.go b/vendor/github.com/slack-go/slack/info.go index 16fa667b..fde2bc98 100644 --- a/vendor/github.com/slack-go/slack/info.go +++ b/vendor/github.com/slack-go/slack/info.go @@ -321,9 +321,13 @@ type UserPrefs struct { } func (api *Client) GetUserPrefs() (*UserPrefsCarrier, error) { + return api.GetUserPrefsContext(context.Background()) +} + +func (api *Client) GetUserPrefsContext(ctx context.Context) (*UserPrefsCarrier, error) { response := UserPrefsCarrier{} - err := api.getMethod(context.Background(), "users.prefs.get", api.token, url.Values{}, &response) + err := api.getMethod(ctx, "users.prefs.get", api.token, url.Values{}, &response) if err != nil { return nil, err } diff --git a/vendor/github.com/slack-go/slack/interactions.go b/vendor/github.com/slack-go/slack/interactions.go index 9a519e27..e362caa8 100644 --- a/vendor/github.com/slack-go/slack/interactions.go +++ b/vendor/github.com/slack-go/slack/interactions.go @@ -28,32 +28,34 @@ const ( InteractionTypeViewSubmission = InteractionType("view_submission") InteractionTypeViewClosed = InteractionType("view_closed") InteractionTypeShortcut = InteractionType("shortcut") + InteractionTypeWorkflowStepEdit = InteractionType("workflow_step_edit") ) // InteractionCallback is sent from slack when a user interactions with a button or dialog. type InteractionCallback struct { - Type InteractionType `json:"type"` - Token string `json:"token"` - CallbackID string `json:"callback_id"` - ResponseURL string `json:"response_url"` - TriggerID string `json:"trigger_id"` - ActionTs string `json:"action_ts"` - Team Team `json:"team"` - Channel Channel `json:"channel"` - User User `json:"user"` - OriginalMessage Message `json:"original_message"` - Message Message `json:"message"` - Name string `json:"name"` - Value string `json:"value"` - MessageTs string `json:"message_ts"` - AttachmentID string `json:"attachment_id"` - ActionCallback ActionCallbacks `json:"actions"` - View View `json:"view"` - ActionID string `json:"action_id"` - APIAppID string `json:"api_app_id"` - BlockID string `json:"block_id"` - Container Container `json:"container"` - Enterprise Enterprise `json:"enterprise"` + Type InteractionType `json:"type"` + Token string `json:"token"` + CallbackID string `json:"callback_id"` + ResponseURL string `json:"response_url"` + TriggerID string `json:"trigger_id"` + ActionTs string `json:"action_ts"` + Team Team `json:"team"` + Channel Channel `json:"channel"` + User User `json:"user"` + OriginalMessage Message `json:"original_message"` + Message Message `json:"message"` + Name string `json:"name"` + Value string `json:"value"` + MessageTs string `json:"message_ts"` + AttachmentID string `json:"attachment_id"` + ActionCallback ActionCallbacks `json:"actions"` + View View `json:"view"` + ActionID string `json:"action_id"` + APIAppID string `json:"api_app_id"` + BlockID string `json:"block_id"` + Container Container `json:"container"` + Enterprise Enterprise `json:"enterprise"` + WorkflowStep InteractionWorkflowStep `json:"workflow_step"` DialogSubmissionCallback ViewSubmissionCallback ViewClosedCallback @@ -134,6 +136,14 @@ type Enterprise struct { Name string `json:"name"` } +type InteractionWorkflowStep struct { + WorkflowStepEditID string `json:"workflow_step_edit_id,omitempty"` + WorkflowID string `json:"workflow_id"` + StepID string `json:"step_id"` + Inputs *WorkflowStepInputs `json:"inputs,omitempty"` + Outputs *[]WorkflowStepOutput `json:"outputs,omitempty"` +} + // ActionCallback is a convenience struct defined to allow dynamic unmarshalling of // the "actions" value in Slack's JSON response, which varies depending on block type type ActionCallbacks struct { diff --git a/vendor/github.com/slack-go/slack/messageID.go b/vendor/github.com/slack-go/slack/messageID.go index a17472b4..689ee80d 100644 --- a/vendor/github.com/slack-go/slack/messageID.go +++ b/vendor/github.com/slack-go/slack/messageID.go @@ -1,6 +1,6 @@ package slack -import "sync" +import "sync/atomic" // IDGenerator provides an interface for generating integer ID values. type IDGenerator interface { @@ -11,20 +11,20 @@ type IDGenerator interface { // concurrent use by multiple goroutines. func NewSafeID(startID int) IDGenerator { return &safeID{ - nextID: startID, - mutex: &sync.Mutex{}, + nextID: int64(startID), } } type safeID struct { - nextID int - mutex *sync.Mutex + nextID int64 } +// make sure safeID implements the IDGenerator interface. +var _ IDGenerator = (*safeID)(nil) + +// Next implements IDGenerator.Next. func (s *safeID) Next() int { - s.mutex.Lock() - defer s.mutex.Unlock() - id := s.nextID - s.nextID++ - return id + id := atomic.AddInt64(&s.nextID, 1) + + return int(id) } diff --git a/vendor/github.com/slack-go/slack/misc.go b/vendor/github.com/slack-go/slack/misc.go index 5272e7c4..804724d7 100644 --- a/vendor/github.com/slack-go/slack/misc.go +++ b/vendor/github.com/slack-go/slack/misc.go @@ -66,29 +66,27 @@ func (e *RateLimitedError) Retryable() bool { } func fileUploadReq(ctx context.Context, path string, values url.Values, r io.Reader) (*http.Request, error) { - req, err := http.NewRequest("POST", path, r) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, path, r) if err != nil { return nil, err } - req = req.WithContext(ctx) - req.URL.RawQuery = (values).Encode() + req.URL.RawQuery = values.Encode() return req, nil } -func downloadFile(client httpClient, token string, downloadURL string, writer io.Writer, d Debug) error { +func downloadFile(ctx context.Context, client httpClient, token string, downloadURL string, writer io.Writer, d Debug) error { if downloadURL == "" { return fmt.Errorf("received empty download URL") } - req, err := http.NewRequest("GET", downloadURL, &bytes.Buffer{}) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadURL, &bytes.Buffer{}) if err != nil { return err } var bearer = "Bearer " + token req.Header.Add("Authorization", bearer) - req.WithContext(context.Background()) resp, err := client.Do(req) if err != nil { @@ -107,8 +105,8 @@ func downloadFile(client httpClient, token string, downloadURL string, writer io return err } -func formReq(endpoint string, values url.Values) (req *http.Request, err error) { - if req, err = http.NewRequest("POST", endpoint, strings.NewReader(values.Encode())); err != nil { +func formReq(ctx context.Context, endpoint string, values url.Values) (req *http.Request, err error) { + if req, err = http.NewRequestWithContext(ctx, http.MethodPost, endpoint, strings.NewReader(values.Encode())); err != nil { return nil, err } @@ -116,13 +114,13 @@ func formReq(endpoint string, values url.Values) (req *http.Request, err error) return req, nil } -func jsonReq(endpoint string, body interface{}) (req *http.Request, err error) { +func jsonReq(ctx context.Context, endpoint string, body interface{}) (req *http.Request, err error) { buffer := bytes.NewBuffer([]byte{}) if err = json.NewEncoder(buffer).Encode(body); err != nil { return nil, err } - if req, err = http.NewRequest("POST", endpoint, buffer); err != nil { + if req, err = http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer); err != nil { return nil, err } @@ -184,7 +182,6 @@ func postWithMultipartResponse(ctx context.Context, client httpClient, path, nam } req.Header.Add("Content-Type", wr.FormDataContentType()) req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) - req = req.WithContext(ctx) resp, err := client.Do(req) if err != nil { @@ -206,7 +203,6 @@ func postWithMultipartResponse(ctx context.Context, client httpClient, path, nam } func doPost(ctx context.Context, client httpClient, req *http.Request, parser responseParser, d Debug) error { - req = req.WithContext(ctx) resp, err := client.Do(req) if err != nil { return err @@ -224,7 +220,7 @@ func doPost(ctx context.Context, client httpClient, req *http.Request, parser re // post JSON. func postJSON(ctx context.Context, client httpClient, endpoint, token string, json []byte, intf interface{}, d Debug) error { reqBody := bytes.NewBuffer(json) - req, err := http.NewRequest("POST", endpoint, reqBody) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, reqBody) if err != nil { return err } @@ -237,7 +233,7 @@ func postJSON(ctx context.Context, client httpClient, endpoint, token string, js // post a url encoded form. func postForm(ctx context.Context, client httpClient, endpoint string, values url.Values, intf interface{}, d Debug) error { reqBody := strings.NewReader(values.Encode()) - req, err := http.NewRequest("POST", endpoint, reqBody) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, reqBody) if err != nil { return err } @@ -246,7 +242,7 @@ func postForm(ctx context.Context, client httpClient, endpoint string, values ur } func getResource(ctx context.Context, client httpClient, endpoint, token string, values url.Values, intf interface{}, d Debug) error { - req, err := http.NewRequest("GET", endpoint, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) if err != nil { return err } diff --git a/vendor/github.com/slack-go/slack/reminders.go b/vendor/github.com/slack-go/slack/reminders.go index ae1da866..53d67c03 100644 --- a/vendor/github.com/slack-go/slack/reminders.go +++ b/vendor/github.com/slack-go/slack/reminders.go @@ -52,10 +52,17 @@ func (api *Client) doReminders(ctx context.Context, path string, values url.Valu // // See https://api.slack.com/methods/reminders.list func (api *Client) ListReminders() ([]*Reminder, error) { + return api.ListRemindersContext(context.Background()) +} + +// ListRemindersContext lists all the reminders created by or for the authenticated user with a custom context +// +// For more details, see ListReminders documentation. +func (api *Client) ListRemindersContext(ctx context.Context) ([]*Reminder, error) { values := url.Values{ "token": {api.token}, } - return api.doReminders(context.Background(), "reminders.list", values) + return api.doReminders(ctx, "reminders.list", values) } // AddChannelReminder adds a reminder for a channel. @@ -64,13 +71,20 @@ func (api *Client) ListReminders() ([]*Reminder, error) { // reminders on a channel is currently undocumented but has been tested to // work) func (api *Client) AddChannelReminder(channelID, text, time string) (*Reminder, error) { + return api.AddChannelReminderContext(context.Background(), channelID, text, time) +} + +// AddChannelReminderContext adds a reminder for a channel with a custom context +// +// For more details, see AddChannelReminder documentation. +func (api *Client) AddChannelReminderContext(ctx context.Context, channelID, text, time string) (*Reminder, error) { values := url.Values{ "token": {api.token}, "text": {text}, "time": {time}, "channel": {channelID}, } - return api.doReminder(context.Background(), "reminders.add", values) + return api.doReminder(ctx, "reminders.add", values) } // AddUserReminder adds a reminder for a user. @@ -79,25 +93,39 @@ func (api *Client) AddChannelReminder(channelID, text, time string) (*Reminder, // reminders on a channel is currently undocumented but has been tested to // work) func (api *Client) AddUserReminder(userID, text, time string) (*Reminder, error) { + return api.AddUserReminderContext(context.Background(), userID, text, time) +} + +// AddUserReminderContext adds a reminder for a user with a custom context +// +// For more details, see AddUserReminder documentation. +func (api *Client) AddUserReminderContext(ctx context.Context, userID, text, time string) (*Reminder, error) { values := url.Values{ "token": {api.token}, "text": {text}, "time": {time}, "user": {userID}, } - return api.doReminder(context.Background(), "reminders.add", values) + return api.doReminder(ctx, "reminders.add", values) } // DeleteReminder deletes an existing reminder. // // See https://api.slack.com/methods/reminders.delete func (api *Client) DeleteReminder(id string) error { + return api.DeleteReminderContext(context.Background(), id) +} + +// DeleteReminderContext deletes an existing reminder with a custom context +// +// For more details, see DeleteReminder documentation. +func (api *Client) DeleteReminderContext(ctx context.Context, id string) error { values := url.Values{ "token": {api.token}, "reminder": {id}, } response := &SlackResponse{} - if err := api.postMethod(context.Background(), "reminders.delete", values, response); err != nil { + if err := api.postMethod(ctx, "reminders.delete", values, response); err != nil { return err } return response.Err() diff --git a/vendor/github.com/slack-go/slack/slackutilsx/slackutilsx.go b/vendor/github.com/slack-go/slack/slackutilsx/slackutilsx.go index 1f7b2b8c..d6c9c07e 100644 --- a/vendor/github.com/slack-go/slack/slackutilsx/slackutilsx.go +++ b/vendor/github.com/slack-go/slack/slackutilsx/slackutilsx.go @@ -50,10 +50,12 @@ func DetectChannelType(channelID string) ChannelType { } } +// initialize replacer only once (if needed) +var escapeReplacer = strings.NewReplacer("&", "&", "<", "<", ">", ">") + // EscapeMessage text func EscapeMessage(message string) string { - replacer := strings.NewReplacer("&", "&", "<", "<", ">", ">") - return replacer.Replace(message) + return escapeReplacer.Replace(message) } // Retryable errors return true. diff --git a/vendor/github.com/slack-go/slack/views.go b/vendor/github.com/slack-go/slack/views.go index e3ee8815..a3a1bd05 100644 --- a/vendor/github.com/slack-go/slack/views.go +++ b/vendor/github.com/slack-go/slack/views.go @@ -98,7 +98,7 @@ func NewErrorsViewSubmissionResponse(errors map[string]string) *ViewSubmissionRe type ModalViewRequest struct { Type ViewType `json:"type"` - Title *TextBlockObject `json:"title"` + Title *TextBlockObject `json:"title,omitempty"` Blocks Blocks `json:"blocks"` Close *TextBlockObject `json:"close,omitempty"` Submit *TextBlockObject `json:"submit,omitempty"` diff --git a/vendor/github.com/slack-go/slack/webhooks.go b/vendor/github.com/slack-go/slack/webhooks.go index 97346e1c..15097f03 100644 --- a/vendor/github.com/slack-go/slack/webhooks.go +++ b/vendor/github.com/slack-go/slack/webhooks.go @@ -1,7 +1,10 @@ package slack import ( + "bytes" "context" + "encoding/json" + "fmt" "net/http" ) @@ -31,3 +34,24 @@ func PostWebhookContext(ctx context.Context, url string, msg *WebhookMessage) er func PostWebhookCustomHTTP(url string, httpClient *http.Client, msg *WebhookMessage) error { return PostWebhookCustomHTTPContext(context.Background(), url, httpClient, msg) } + +func PostWebhookCustomHTTPContext(ctx context.Context, url string, httpClient *http.Client, msg *WebhookMessage) error { + raw, err := json.Marshal(msg) + if err != nil { + return fmt.Errorf("marshal failed: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(raw)) + if err != nil { + return fmt.Errorf("failed new request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + + resp, err := httpClient.Do(req) + if err != nil { + return fmt.Errorf("failed to post webhook: %w", err) + } + defer resp.Body.Close() + + return checkStatusCode(resp, discard{}) +} diff --git a/vendor/github.com/slack-go/slack/webhooks_go112.go b/vendor/github.com/slack-go/slack/webhooks_go112.go deleted file mode 100644 index 0eb539ac..00000000 --- a/vendor/github.com/slack-go/slack/webhooks_go112.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build !go1.13 -// +build !go1.13 - -package slack - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "net/http" -) - -func PostWebhookCustomHTTPContext(ctx context.Context, url string, httpClient *http.Client, msg *WebhookMessage) error { - raw, err := json.Marshal(msg) - if err != nil { - return fmt.Errorf("marshal failed: %v", err) - } - - req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(raw)) - if err != nil { - return fmt.Errorf("failed new request: %v", err) - } - req = req.WithContext(ctx) - req.Header.Set("Content-Type", "application/json") - - resp, err := httpClient.Do(req) - if err != nil { - return fmt.Errorf("failed to post webhook: %v", err) - } - defer resp.Body.Close() - - return checkStatusCode(resp, discard{}) -} diff --git a/vendor/github.com/slack-go/slack/webhooks_go113.go b/vendor/github.com/slack-go/slack/webhooks_go113.go deleted file mode 100644 index 021eac01..00000000 --- a/vendor/github.com/slack-go/slack/webhooks_go113.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build go1.13 -// +build go1.13 - -package slack - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "net/http" -) - -func PostWebhookCustomHTTPContext(ctx context.Context, url string, httpClient *http.Client, msg *WebhookMessage) error { - raw, err := json.Marshal(msg) - if err != nil { - return fmt.Errorf("marshal failed: %w", err) - } - - req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(raw)) - if err != nil { - return fmt.Errorf("failed new request: %w", err) - } - req.Header.Set("Content-Type", "application/json") - - resp, err := httpClient.Do(req) - if err != nil { - return fmt.Errorf("failed to post webhook: %w", err) - } - defer resp.Body.Close() - - return checkStatusCode(resp, discard{}) -} diff --git a/vendor/github.com/slack-go/slack/websocket_managed_conn.go b/vendor/github.com/slack-go/slack/websocket_managed_conn.go index 5555c316..92536171 100644 --- a/vendor/github.com/slack-go/slack/websocket_managed_conn.go +++ b/vendor/github.com/slack-go/slack/websocket_managed_conn.go @@ -9,11 +9,11 @@ import ( "reflect" "time" - "github.com/slack-go/slack/internal/backoff" - "github.com/slack-go/slack/internal/misc" - "github.com/gorilla/websocket" + + "github.com/slack-go/slack/internal/backoff" "github.com/slack-go/slack/internal/errorsx" + "github.com/slack-go/slack/internal/misc" "github.com/slack-go/slack/internal/timex" ) diff --git a/vendor/github.com/slack-go/slack/workflow_step.go b/vendor/github.com/slack-go/slack/workflow_step.go new file mode 100644 index 00000000..bcc892c5 --- /dev/null +++ b/vendor/github.com/slack-go/slack/workflow_step.go @@ -0,0 +1,98 @@ +package slack + +import ( + "context" + "encoding/json" +) + +const VTWorkflowStep ViewType = "workflow_step" + +type ( + ConfigurationModalRequest struct { + ModalViewRequest + } + + WorkflowStepCompleteResponse struct { + WorkflowStepEditID string `json:"workflow_step_edit_id"` + Inputs *WorkflowStepInputs `json:"inputs,omitempty"` + Outputs *[]WorkflowStepOutput `json:"outputs,omitempty"` + } + + WorkflowStepInputElement struct { + Value string `json:"value"` + SkipVariableReplacement bool `json:"skip_variable_replacement"` + } + + WorkflowStepInputs map[string]WorkflowStepInputElement + + WorkflowStepOutput struct { + Name string `json:"name"` + Type string `json:"type"` + Label string `json:"label"` + } +) + +func NewConfigurationModalRequest(blocks Blocks, privateMetaData string, externalID string) *ConfigurationModalRequest { + return &ConfigurationModalRequest{ + ModalViewRequest{ + Type: VTWorkflowStep, + Title: nil, // slack configuration modal must not have a title! + Blocks: blocks, + PrivateMetadata: privateMetaData, + ExternalID: externalID, + }, + } +} + +func (api *Client) SaveWorkflowStepConfiguration(workflowStepEditID string, inputs *WorkflowStepInputs, outputs *[]WorkflowStepOutput) error { + return api.SaveWorkflowStepConfigurationContext(context.Background(), workflowStepEditID, inputs, outputs) +} + +func (api *Client) SaveWorkflowStepConfigurationContext(ctx context.Context, workflowStepEditID string, inputs *WorkflowStepInputs, outputs *[]WorkflowStepOutput) error { + // More information: https://api.slack.com/methods/workflows.updateStep + wscr := WorkflowStepCompleteResponse{ + WorkflowStepEditID: workflowStepEditID, + Inputs: inputs, + Outputs: outputs, + } + + endpoint := api.endpoint + "workflows.updateStep" + jsonData, err := json.Marshal(wscr) + if err != nil { + return err + } + + response := &SlackResponse{} + if err := postJSON(ctx, api.httpclient, endpoint, api.token, jsonData, response, api); err != nil { + return err + } + + if !response.Ok { + return response.Err() + } + + return nil +} + +func GetInitialOptionFromWorkflowStepInput(selection *SelectBlockElement, inputs *WorkflowStepInputs, options []*OptionBlockObject) (*OptionBlockObject, bool) { + if len(*inputs) == 0 { + return &OptionBlockObject{}, false + } + if len(options) == 0 { + return &OptionBlockObject{}, false + } + + if val, ok := (*inputs)[selection.ActionID]; ok { + if val.SkipVariableReplacement { + return &OptionBlockObject{}, false + } + + for _, option := range options { + if option.Value == val.Value { + return option, true + } + } + } + + return &OptionBlockObject{}, false +} diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md index fb8eaaf8..cab257f5 100644 --- a/vendor/github.com/spf13/afero/README.md +++ b/vendor/github.com/spf13/afero/README.md @@ -79,11 +79,11 @@ would. So if my application before had: ```go -os.Open('/tmp/foo') +os.Open("/tmp/foo") ``` We would replace it with: ```go -AppFs.Open('/tmp/foo') +AppFs.Open("/tmp/foo") ``` `AppFs` being the variable we defined above. @@ -259,6 +259,18 @@ system using InMemoryFile. Afero has experimental support for secure file transfer protocol (sftp). Which can be used to perform file operations over a encrypted channel. +### GCSFs + +Afero has experimental support for Google Cloud Storage (GCS). You can either set the +`GOOGLE_APPLICATION_CREDENTIALS_JSON` env variable to your JSON credentials or use `opts` in +`NewGcsFS` to configure access to your GCS bucket. + +Some known limitations of the existing implementation: +* No Chmod support - The GCS ACL could probably be mapped to *nix style permissions but that would add another level of complexity and is ignored in this version. +* No Chtimes support - Could be simulated with attributes (gcs a/m-times are set implicitly) but that's is left for another version. +* Not thread safe - Also assumes all file operations are done through the same instance of the GcsFs. File operations between different GcsFs instances are not guaranteed to be consistent. + + ## Filtering Backends ### BasePathFs diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go index 71471aa2..017d344f 100644 --- a/vendor/github.com/spf13/afero/cacheOnReadFs.go +++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go @@ -75,6 +75,10 @@ func (u *CacheOnReadFs) copyToLayer(name string) error { return copyToLayer(u.base, u.layer, name) } +func (u *CacheOnReadFs) copyFileToLayer(name string, flag int, perm os.FileMode) error { + return copyFileToLayer(u.base, u.layer, name, flag, perm) +} + func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { st, _, err := u.cacheStatus(name) if err != nil { @@ -212,7 +216,7 @@ func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, switch st { case cacheLocal, cacheHit: default: - if err := u.copyToLayer(name); err != nil { + if err := u.copyFileToLayer(name, flag, perm); err != nil { return nil, err } } diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go index 5a20730c..5ef8b6a3 100644 --- a/vendor/github.com/spf13/afero/mem/file.go +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -71,7 +71,7 @@ func CreateFile(name string) *FileData { } func CreateDir(name string) *FileData { - return &FileData{name: name, memDir: &DirMap{}, dir: true} + return &FileData{name: name, memDir: &DirMap{}, dir: true, modtime: time.Now()} } func ChangeFileName(f *FileData, newname string) { diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go index 5c265f92..ea0798d8 100644 --- a/vendor/github.com/spf13/afero/memmap.go +++ b/vendor/github.com/spf13/afero/memmap.go @@ -279,7 +279,7 @@ func (m *MemMapFs) RemoveAll(path string) error { defer m.mu.RUnlock() for p := range m.getData() { - if strings.HasPrefix(p, path) { + if p == path || strings.HasPrefix(p, path+FilePathSeparator) { m.mu.RUnlock() m.mu.Lock() delete(m.getData(), p) diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go index 985363ee..34f99a40 100644 --- a/vendor/github.com/spf13/afero/unionFile.go +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -268,13 +268,7 @@ func (f *UnionFile) WriteString(s string) (n int, err error) { return 0, BADFD } -func copyToLayer(base Fs, layer Fs, name string) error { - bfh, err := base.Open(name) - if err != nil { - return err - } - defer bfh.Close() - +func copyFile(base Fs, layer Fs, name string, bfh File) error { // First make sure the directory exists exists, err := Exists(layer, filepath.Dir(name)) if err != nil { @@ -315,3 +309,23 @@ func copyToLayer(base Fs, layer Fs, name string) error { } return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) } + +func copyToLayer(base Fs, layer Fs, name string) error { + bfh, err := base.Open(name) + if err != nil { + return err + } + defer bfh.Close() + + return copyFile(base, layer, name, bfh) +} + +func copyFileToLayer(base Fs, layer Fs, name string, flag int, perm os.FileMode) error { + bfh, err := base.OpenFile(name, flag, perm) + if err != nil { + return err + } + defer bfh.Close() + + return copyFile(base, layer, name, bfh) +} diff --git a/vendor/github.com/spf13/viper/.golangci.yml b/vendor/github.com/spf13/viper/.golangci.yml index 52e77eef..16e03965 100644 --- a/vendor/github.com/spf13/viper/.golangci.yml +++ b/vendor/github.com/spf13/viper/.golangci.yml @@ -3,7 +3,10 @@ run: linters-settings: gci: - local-prefixes: github.com/spf13/viper + sections: + - standard + - default + - prefix(github.com/spf13/viper) golint: min-confidence: 0 goimports: diff --git a/vendor/github.com/spf13/viper/Makefile b/vendor/github.com/spf13/viper/Makefile index 1279096f..02d3e371 100644 --- a/vendor/github.com/spf13/viper/Makefile +++ b/vendor/github.com/spf13/viper/Makefile @@ -15,8 +15,8 @@ TEST_FORMAT = short-verbose endif # Dependency versions -GOTESTSUM_VERSION = 1.7.0 -GOLANGCI_VERSION = 1.43.0 +GOTESTSUM_VERSION = 1.8.0 +GOLANGCI_VERSION = 1.45.2 # Add the ability to override some variables # Use with care diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index 9712e705..c14e8927 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -11,7 +11,7 @@ [![GitHub Workflow Status](https://img.shields.io/github/workflow/status/spf13/viper/CI?style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.14-61CFDD.svg?style=flat-square) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.15-61CFDD.svg?style=flat-square) [![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) **Go configuration with fangs!** diff --git a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md index 096277af..c4e36c68 100644 --- a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md +++ b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md @@ -21,3 +21,12 @@ The solution is easy: switch to using Go Modules. Please refer to the [wiki](https://github.com/golang/go/wiki/Modules) on how to do that. **tl;dr* `export GO111MODULE=on` + +## Unquoted 'y' and 'n' characters get replaced with _true_ and _false_ when reading a YAML file + +This is a YAML 1.1 feature according to [go-yaml/yaml#740](https://github.com/go-yaml/yaml/issues/740). + +Potential solutions are: + +1. Quoting values resolved as boolean +1. Upgrading to YAML v3 (for the time being this is possible by passing the `viper_yaml3` tag to your build) diff --git a/vendor/github.com/spf13/viper/experimental_logger.go b/vendor/github.com/spf13/viper/experimental_logger.go new file mode 100644 index 00000000..206dad6a --- /dev/null +++ b/vendor/github.com/spf13/viper/experimental_logger.go @@ -0,0 +1,11 @@ +//go:build viper_logger +// +build viper_logger + +package viper + +// WithLogger sets a custom logger. +func WithLogger(l Logger) Option { + return optionFunc(func(v *Viper) { + v.logger = l + }) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/decoder.go b/vendor/github.com/spf13/viper/internal/encoding/decoder.go index 08b1bb66..f472e9ff 100644 --- a/vendor/github.com/spf13/viper/internal/encoding/decoder.go +++ b/vendor/github.com/spf13/viper/internal/encoding/decoder.go @@ -4,10 +4,10 @@ import ( "sync" ) -// Decoder decodes the contents of b into a v representation. +// Decoder decodes the contents of b into v. // It's primarily used for decoding contents of a file into a map[string]interface{}. type Decoder interface { - Decode(b []byte, v interface{}) error + Decode(b []byte, v map[string]interface{}) error } const ( @@ -48,7 +48,7 @@ func (e *DecoderRegistry) RegisterDecoder(format string, enc Decoder) error { } // Decode calls the underlying Decoder based on the format. -func (e *DecoderRegistry) Decode(format string, b []byte, v interface{}) error { +func (e *DecoderRegistry) Decode(format string, b []byte, v map[string]interface{}) error { e.mu.RLock() decoder, ok := e.decoders[format] e.mu.RUnlock() diff --git a/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go b/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go new file mode 100644 index 00000000..4485063b --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go @@ -0,0 +1,61 @@ +package dotenv + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/subosito/gotenv" +) + +const keyDelimiter = "_" + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for encoding data containing environment variables +// (commonly called as dotenv format). +type Codec struct{} + +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { + flattened := map[string]interface{}{} + + flattened = flattenAndMergeMap(flattened, v, "", keyDelimiter) + + keys := make([]string, 0, len(flattened)) + + for key := range flattened { + keys = append(keys, key) + } + + sort.Strings(keys) + + var buf bytes.Buffer + + for _, key := range keys { + _, err := buf.WriteString(fmt.Sprintf("%v=%v\n", strings.ToUpper(key), flattened[key])) + if err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} + +func (Codec) Decode(b []byte, v map[string]interface{}) error { + var buf bytes.Buffer + + _, err := buf.Write(b) + if err != nil { + return err + } + + env, err := gotenv.StrictParse(&buf) + if err != nil { + return err + } + + for key, value := range env { + v[key] = value + } + + return nil +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go new file mode 100644 index 00000000..ce6e6efa --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go @@ -0,0 +1,41 @@ +package dotenv + +import ( + "strings" + + "github.com/spf13/cast" +) + +// flattenAndMergeMap recursively flattens the given map into a new map +// Code is based on the function with the same name in tha main package. +// TODO: move it to a common place +func flattenAndMergeMap(shadow map[string]interface{}, m map[string]interface{}, prefix string, delimiter string) map[string]interface{} { + if shadow != nil && prefix != "" && shadow[prefix] != nil { + // prefix is shadowed => nothing more to flatten + return shadow + } + if shadow == nil { + shadow = make(map[string]interface{}) + } + + var m2 map[string]interface{} + if prefix != "" { + prefix += delimiter + } + for k, val := range m { + fullKey := prefix + k + switch val.(type) { + case map[string]interface{}: + m2 = val.(map[string]interface{}) + case map[interface{}]interface{}: + m2 = cast.ToStringMap(val) + default: + // immediate value + shadow[strings.ToLower(fullKey)] = val + continue + } + // recursively merge to shadow map + shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) + } + return shadow +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/encoder.go b/vendor/github.com/spf13/viper/internal/encoding/encoder.go index 82c7996c..2341bf23 100644 --- a/vendor/github.com/spf13/viper/internal/encoding/encoder.go +++ b/vendor/github.com/spf13/viper/internal/encoding/encoder.go @@ -7,7 +7,7 @@ import ( // Encoder encodes the contents of v into a byte representation. // It's primarily used for encoding a map[string]interface{} into a file format. type Encoder interface { - Encode(v interface{}) ([]byte, error) + Encode(v map[string]interface{}) ([]byte, error) } const ( @@ -47,7 +47,7 @@ func (e *EncoderRegistry) RegisterEncoder(format string, enc Encoder) error { return nil } -func (e *EncoderRegistry) Encode(format string, v interface{}) ([]byte, error) { +func (e *EncoderRegistry) Encode(format string, v map[string]interface{}) ([]byte, error) { e.mu.RLock() encoder, ok := e.encoders[format] e.mu.RUnlock() diff --git a/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go index f3e4ab12..7fde8e4b 100644 --- a/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go +++ b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go @@ -12,7 +12,7 @@ import ( // TODO: add printer config to the codec? type Codec struct{} -func (Codec) Encode(v interface{}) ([]byte, error) { +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { b, err := json.Marshal(v) if err != nil { return nil, err @@ -35,6 +35,6 @@ func (Codec) Encode(v interface{}) ([]byte, error) { return buf.Bytes(), nil } -func (Codec) Decode(b []byte, v interface{}) error { - return hcl.Unmarshal(b, v) +func (Codec) Decode(b []byte, v map[string]interface{}) error { + return hcl.Unmarshal(b, &v) } diff --git a/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go b/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go new file mode 100644 index 00000000..9acd87fc --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go @@ -0,0 +1,99 @@ +package ini + +import ( + "bytes" + "sort" + "strings" + + "github.com/spf13/cast" + "gopkg.in/ini.v1" +) + +// LoadOptions contains all customized options used for load data source(s). +// This type is added here for convenience: this way consumers can import a single package called "ini". +type LoadOptions = ini.LoadOptions + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for INI encoding. +type Codec struct { + KeyDelimiter string + LoadOptions LoadOptions +} + +func (c Codec) Encode(v map[string]interface{}) ([]byte, error) { + cfg := ini.Empty() + ini.PrettyFormat = false + + flattened := map[string]interface{}{} + + flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter()) + + keys := make([]string, 0, len(flattened)) + + for key := range flattened { + keys = append(keys, key) + } + + sort.Strings(keys) + + for _, key := range keys { + sectionName, keyName := "", key + + lastSep := strings.LastIndex(key, ".") + if lastSep != -1 { + sectionName = key[:(lastSep)] + keyName = key[(lastSep + 1):] + } + + // TODO: is this a good idea? + if sectionName == "default" { + sectionName = "" + } + + cfg.Section(sectionName).Key(keyName).SetValue(cast.ToString(flattened[key])) + } + + var buf bytes.Buffer + + _, err := cfg.WriteTo(&buf) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (c Codec) Decode(b []byte, v map[string]interface{}) error { + cfg := ini.Empty(c.LoadOptions) + + err := cfg.Append(b) + if err != nil { + return err + } + + sections := cfg.Sections() + + for i := 0; i < len(sections); i++ { + section := sections[i] + keys := section.Keys() + + for j := 0; j < len(keys); j++ { + key := keys[j] + value := cfg.Section(section.Name()).Key(key.Name()).String() + + deepestMap := deepSearch(v, strings.Split(section.Name(), c.keyDelimiter())) + + // set innermost value + deepestMap[key.Name()] = value + } + } + + return nil +} + +func (c Codec) keyDelimiter() string { + if c.KeyDelimiter == "" { + return "." + } + + return c.KeyDelimiter +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go new file mode 100644 index 00000000..8329856b --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go @@ -0,0 +1,74 @@ +package ini + +import ( + "strings" + + "github.com/spf13/cast" +) + +// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED +// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE +// deepSearch scans deep maps, following the key indexes listed in the +// sequence "path". +// The last value is expected to be another map, and is returned. +// +// In case intermediate keys do not exist, or map to a non-map value, +// a new map is created and inserted, and the search continues from there: +// the initial map "m" may be modified! +func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { + for _, k := range path { + m2, ok := m[k] + if !ok { + // intermediate key does not exist + // => create it and continue from there + m3 := make(map[string]interface{}) + m[k] = m3 + m = m3 + continue + } + m3, ok := m2.(map[string]interface{}) + if !ok { + // intermediate key is a value + // => replace with a new map + m3 = make(map[string]interface{}) + m[k] = m3 + } + // continue search from here + m = m3 + } + return m +} + +// flattenAndMergeMap recursively flattens the given map into a new map +// Code is based on the function with the same name in tha main package. +// TODO: move it to a common place +func flattenAndMergeMap(shadow map[string]interface{}, m map[string]interface{}, prefix string, delimiter string) map[string]interface{} { + if shadow != nil && prefix != "" && shadow[prefix] != nil { + // prefix is shadowed => nothing more to flatten + return shadow + } + if shadow == nil { + shadow = make(map[string]interface{}) + } + + var m2 map[string]interface{} + if prefix != "" { + prefix += delimiter + } + for k, val := range m { + fullKey := prefix + k + switch val.(type) { + case map[string]interface{}: + m2 = val.(map[string]interface{}) + case map[interface{}]interface{}: + m2 = cast.ToStringMap(val) + default: + // immediate value + shadow[strings.ToLower(fullKey)] = val + continue + } + // recursively merge to shadow map + shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) + } + return shadow +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go new file mode 100644 index 00000000..b8a2251c --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go @@ -0,0 +1,86 @@ +package javaproperties + +import ( + "bytes" + "sort" + "strings" + + "github.com/magiconair/properties" + "github.com/spf13/cast" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for Java properties encoding. +type Codec struct { + KeyDelimiter string + + // Store read properties on the object so that we can write back in order with comments. + // This will only be used if the configuration read is a properties file. + // TODO: drop this feature in v2 + // TODO: make use of the global properties object optional + Properties *properties.Properties +} + +func (c *Codec) Encode(v map[string]interface{}) ([]byte, error) { + if c.Properties == nil { + c.Properties = properties.NewProperties() + } + + flattened := map[string]interface{}{} + + flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter()) + + keys := make([]string, 0, len(flattened)) + + for key := range flattened { + keys = append(keys, key) + } + + sort.Strings(keys) + + for _, key := range keys { + _, _, err := c.Properties.Set(key, cast.ToString(flattened[key])) + if err != nil { + return nil, err + } + } + + var buf bytes.Buffer + + _, err := c.Properties.WriteComment(&buf, "#", properties.UTF8) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (c *Codec) Decode(b []byte, v map[string]interface{}) error { + var err error + c.Properties, err = properties.Load(b, properties.UTF8) + if err != nil { + return err + } + + for _, key := range c.Properties.Keys() { + // ignore existence check: we know it's there + value, _ := c.Properties.Get(key) + + // recursively build nested maps + path := strings.Split(key, c.keyDelimiter()) + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(v, path[0:len(path)-1]) + + // set innermost value + deepestMap[lastKey] = value + } + + return nil +} + +func (c Codec) keyDelimiter() string { + if c.KeyDelimiter == "" { + return "." + } + + return c.KeyDelimiter +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go new file mode 100644 index 00000000..93755cac --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go @@ -0,0 +1,74 @@ +package javaproperties + +import ( + "strings" + + "github.com/spf13/cast" +) + +// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED +// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE +// deepSearch scans deep maps, following the key indexes listed in the +// sequence "path". +// The last value is expected to be another map, and is returned. +// +// In case intermediate keys do not exist, or map to a non-map value, +// a new map is created and inserted, and the search continues from there: +// the initial map "m" may be modified! +func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { + for _, k := range path { + m2, ok := m[k] + if !ok { + // intermediate key does not exist + // => create it and continue from there + m3 := make(map[string]interface{}) + m[k] = m3 + m = m3 + continue + } + m3, ok := m2.(map[string]interface{}) + if !ok { + // intermediate key is a value + // => replace with a new map + m3 = make(map[string]interface{}) + m[k] = m3 + } + // continue search from here + m = m3 + } + return m +} + +// flattenAndMergeMap recursively flattens the given map into a new map +// Code is based on the function with the same name in tha main package. +// TODO: move it to a common place +func flattenAndMergeMap(shadow map[string]interface{}, m map[string]interface{}, prefix string, delimiter string) map[string]interface{} { + if shadow != nil && prefix != "" && shadow[prefix] != nil { + // prefix is shadowed => nothing more to flatten + return shadow + } + if shadow == nil { + shadow = make(map[string]interface{}) + } + + var m2 map[string]interface{} + if prefix != "" { + prefix += delimiter + } + for k, val := range m { + fullKey := prefix + k + switch val.(type) { + case map[string]interface{}: + m2 = val.(map[string]interface{}) + case map[interface{}]interface{}: + m2 = cast.ToStringMap(val) + default: + // immediate value + shadow[strings.ToLower(fullKey)] = val + continue + } + // recursively merge to shadow map + shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) + } + return shadow +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/json/codec.go b/vendor/github.com/spf13/viper/internal/encoding/json/codec.go index dff9ec98..1b7caace 100644 --- a/vendor/github.com/spf13/viper/internal/encoding/json/codec.go +++ b/vendor/github.com/spf13/viper/internal/encoding/json/codec.go @@ -7,11 +7,11 @@ import ( // Codec implements the encoding.Encoder and encoding.Decoder interfaces for JSON encoding. type Codec struct{} -func (Codec) Encode(v interface{}) ([]byte, error) { +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { // TODO: expose prefix and indent in the Codec as setting? return json.MarshalIndent(v, "", " ") } -func (Codec) Decode(b []byte, v interface{}) error { - return json.Unmarshal(b, v) +func (Codec) Decode(b []byte, v map[string]interface{}) error { + return json.Unmarshal(b, &v) } diff --git a/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go index c043802b..ff1112ca 100644 --- a/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go +++ b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go @@ -1,3 +1,6 @@ +//go:build !viper_toml2 +// +build !viper_toml2 + package toml import ( @@ -7,39 +10,30 @@ import ( // Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. type Codec struct{} -func (Codec) Encode(v interface{}) ([]byte, error) { - if m, ok := v.(map[string]interface{}); ok { - t, err := toml.TreeFromMap(m) - if err != nil { - return nil, err - } - - s, err := t.ToTomlString() - if err != nil { - return nil, err - } +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { + t, err := toml.TreeFromMap(v) + if err != nil { + return nil, err + } - return []byte(s), nil + s, err := t.ToTomlString() + if err != nil { + return nil, err } - return toml.Marshal(v) + return []byte(s), nil } -func (Codec) Decode(b []byte, v interface{}) error { +func (Codec) Decode(b []byte, v map[string]interface{}) error { tree, err := toml.LoadBytes(b) if err != nil { return err } - if m, ok := v.(*map[string]interface{}); ok { - vmap := *m - tmap := tree.ToMap() - for k, v := range tmap { - vmap[k] = v - } - - return nil + tmap := tree.ToMap() + for key, value := range tmap { + v[key] = value } - return tree.Unmarshal(v) + return nil } diff --git a/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go b/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go new file mode 100644 index 00000000..566b7062 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go @@ -0,0 +1,19 @@ +//go:build viper_toml2 +// +build viper_toml2 + +package toml + +import ( + "github.com/pelletier/go-toml/v2" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. +type Codec struct{} + +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { + return toml.Marshal(v) +} + +func (Codec) Decode(b []byte, v map[string]interface{}) error { + return toml.Unmarshal(b, &v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go index f94b2699..24cc19df 100644 --- a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go +++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go @@ -1,14 +1,14 @@ package yaml -import "gopkg.in/yaml.v2" +// import "gopkg.in/yaml.v2" // Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding. type Codec struct{} -func (Codec) Encode(v interface{}) ([]byte, error) { +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { return yaml.Marshal(v) } -func (Codec) Decode(b []byte, v interface{}) error { - return yaml.Unmarshal(b, v) +func (Codec) Decode(b []byte, v map[string]interface{}) error { + return yaml.Unmarshal(b, &v) } diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go new file mode 100644 index 00000000..ca29b84d --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go @@ -0,0 +1,14 @@ +//go:build !viper_yaml3 +// +build !viper_yaml3 + +package yaml + +import yamlv2 "gopkg.in/yaml.v2" + +var yaml = struct { + Marshal func(in interface{}) (out []byte, err error) + Unmarshal func(in []byte, out interface{}) (err error) +}{ + Marshal: yamlv2.Marshal, + Unmarshal: yamlv2.Unmarshal, +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go new file mode 100644 index 00000000..96b8957f --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go @@ -0,0 +1,14 @@ +//go:build viper_yaml3 +// +build viper_yaml3 + +package yaml + +import yamlv3 "gopkg.in/yaml.v3" + +var yaml = struct { + Marshal func(in interface{}) (out []byte, err error) + Unmarshal func(in []byte, out interface{}) (err error) +}{ + Marshal: yamlv3.Marshal, + Unmarshal: yamlv3.Unmarshal, +} diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index 4a993589..4a9dac9d 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -35,16 +35,16 @@ import ( "time" "github.com/fsnotify/fsnotify" - "github.com/magiconair/properties" "github.com/mitchellh/mapstructure" "github.com/spf13/afero" "github.com/spf13/cast" "github.com/spf13/pflag" - "github.com/subosito/gotenv" - "gopkg.in/ini.v1" "github.com/spf13/viper/internal/encoding" + "github.com/spf13/viper/internal/encoding/dotenv" "github.com/spf13/viper/internal/encoding/hcl" + "github.com/spf13/viper/internal/encoding/ini" + "github.com/spf13/viper/internal/encoding/javaproperties" "github.com/spf13/viper/internal/encoding/json" "github.com/spf13/viper/internal/encoding/toml" "github.com/spf13/viper/internal/encoding/yaml" @@ -67,47 +67,8 @@ type RemoteResponse struct { Error error } -var ( - encoderRegistry = encoding.NewEncoderRegistry() - decoderRegistry = encoding.NewDecoderRegistry() -) - func init() { v = New() - - { - codec := yaml.Codec{} - - encoderRegistry.RegisterEncoder("yaml", codec) - decoderRegistry.RegisterDecoder("yaml", codec) - - encoderRegistry.RegisterEncoder("yml", codec) - decoderRegistry.RegisterDecoder("yml", codec) - } - - { - codec := json.Codec{} - - encoderRegistry.RegisterEncoder("json", codec) - decoderRegistry.RegisterDecoder("json", codec) - } - - { - codec := toml.Codec{} - - encoderRegistry.RegisterEncoder("toml", codec) - decoderRegistry.RegisterDecoder("toml", codec) - } - - { - codec := hcl.Codec{} - - encoderRegistry.RegisterEncoder("hcl", codec) - decoderRegistry.RegisterDecoder("hcl", codec) - - encoderRegistry.RegisterEncoder("tfvars", codec) - decoderRegistry.RegisterDecoder("tfvars", codec) - } } type remoteConfigFactory interface { @@ -254,13 +215,13 @@ type Viper struct { aliases map[string]string typeByDefValue bool - // Store read properties on the object so that we can write back in order with comments. - // This will only be used if the configuration read is a properties file. - properties *properties.Properties - onConfigChange func(fsnotify.Event) logger Logger + + // TODO: should probably be protected with a mutex + encoderRegistry *encoding.EncoderRegistry + decoderRegistry *encoding.DecoderRegistry } // New returns an initialized Viper instance. @@ -280,6 +241,8 @@ func New() *Viper { v.typeByDefValue = false v.logger = jwwLogger{} + v.resetEncoding() + return v } @@ -326,6 +289,8 @@ func NewWithOptions(opts ...Option) *Viper { opt.apply(v) } + v.resetEncoding() + return v } @@ -338,6 +303,84 @@ func Reset() { SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} } +// TODO: make this lazy initialization instead +func (v *Viper) resetEncoding() { + encoderRegistry := encoding.NewEncoderRegistry() + decoderRegistry := encoding.NewDecoderRegistry() + + { + codec := yaml.Codec{} + + encoderRegistry.RegisterEncoder("yaml", codec) + decoderRegistry.RegisterDecoder("yaml", codec) + + encoderRegistry.RegisterEncoder("yml", codec) + decoderRegistry.RegisterDecoder("yml", codec) + } + + { + codec := json.Codec{} + + encoderRegistry.RegisterEncoder("json", codec) + decoderRegistry.RegisterDecoder("json", codec) + } + + { + codec := toml.Codec{} + + encoderRegistry.RegisterEncoder("toml", codec) + decoderRegistry.RegisterDecoder("toml", codec) + } + + { + codec := hcl.Codec{} + + encoderRegistry.RegisterEncoder("hcl", codec) + decoderRegistry.RegisterDecoder("hcl", codec) + + encoderRegistry.RegisterEncoder("tfvars", codec) + decoderRegistry.RegisterDecoder("tfvars", codec) + } + + { + codec := ini.Codec{ + KeyDelimiter: v.keyDelim, + LoadOptions: v.iniLoadOptions, + } + + encoderRegistry.RegisterEncoder("ini", codec) + decoderRegistry.RegisterDecoder("ini", codec) + } + + { + codec := &javaproperties.Codec{ + KeyDelimiter: v.keyDelim, + } + + encoderRegistry.RegisterEncoder("properties", codec) + decoderRegistry.RegisterDecoder("properties", codec) + + encoderRegistry.RegisterEncoder("props", codec) + decoderRegistry.RegisterDecoder("props", codec) + + encoderRegistry.RegisterEncoder("prop", codec) + decoderRegistry.RegisterDecoder("prop", codec) + } + + { + codec := &dotenv.Codec{} + + encoderRegistry.RegisterEncoder("dotenv", codec) + decoderRegistry.RegisterDecoder("dotenv", codec) + + encoderRegistry.RegisterEncoder("env", codec) + decoderRegistry.RegisterDecoder("env", codec) + } + + v.encoderRegistry = encoderRegistry + v.decoderRegistry = decoderRegistry +} + type defaultRemoteProvider struct { provider string endpoint string @@ -433,7 +476,7 @@ func (v *Viper) WatchConfig() { v.onConfigChange(event) } } else if filepath.Clean(event.Name) == configFile && - event.Op&fsnotify.Remove&fsnotify.Remove != 0 { + event.Op&fsnotify.Remove != 0 { eventsWG.Done() return } @@ -1634,53 +1677,11 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { buf.ReadFrom(in) switch format := strings.ToLower(v.getConfigType()); format { - case "yaml", "yml", "json", "toml", "hcl", "tfvars": - err := decoderRegistry.Decode(format, buf.Bytes(), &c) - if err != nil { - return ConfigParseError{err} - } - - case "dotenv", "env": - env, err := gotenv.StrictParse(buf) - if err != nil { - return ConfigParseError{err} - } - for k, v := range env { - c[k] = v - } - - case "properties", "props", "prop": - v.properties = properties.NewProperties() - var err error - if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil { - return ConfigParseError{err} - } - for _, key := range v.properties.Keys() { - value, _ := v.properties.Get(key) - // recursively build nested maps - path := strings.Split(key, ".") - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(c, path[0:len(path)-1]) - // set innermost value - deepestMap[lastKey] = value - } - - case "ini": - cfg := ini.Empty(v.iniLoadOptions) - err := cfg.Append(buf.Bytes()) + case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "properties", "props", "prop", "dotenv", "env": + err := v.decoderRegistry.Decode(format, buf.Bytes(), c) if err != nil { return ConfigParseError{err} } - sections := cfg.Sections() - for i := 0; i < len(sections); i++ { - section := sections[i] - keys := section.Keys() - for j := 0; j < len(keys); j++ { - key := keys[j] - value := cfg.Section(section.Name()).Key(key.Name()).String() - c[section.Name()+"."+key.Name()] = value - } - } } insensitiviseMap(c) @@ -1691,8 +1692,8 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { func (v *Viper) marshalWriter(f afero.File, configType string) error { c := v.AllSettings() switch configType { - case "yaml", "yml", "json", "toml", "hcl", "tfvars": - b, err := encoderRegistry.Encode(configType, c) + case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "prop", "props", "properties", "dotenv", "env": + b, err := v.encoderRegistry.Encode(configType, c) if err != nil { return ConfigMarshalError{err} } @@ -1701,50 +1702,6 @@ func (v *Viper) marshalWriter(f afero.File, configType string) error { if err != nil { return ConfigMarshalError{err} } - - case "prop", "props", "properties": - if v.properties == nil { - v.properties = properties.NewProperties() - } - p := v.properties - for _, key := range v.AllKeys() { - _, _, err := p.Set(key, v.GetString(key)) - if err != nil { - return ConfigMarshalError{err} - } - } - _, err := p.WriteComment(f, "#", properties.UTF8) - if err != nil { - return ConfigMarshalError{err} - } - - case "dotenv", "env": - lines := []string{} - for _, key := range v.AllKeys() { - envName := strings.ToUpper(strings.Replace(key, ".", "_", -1)) - val := v.Get(key) - lines = append(lines, fmt.Sprintf("%v=%v", envName, val)) - } - s := strings.Join(lines, "\n") - if _, err := f.WriteString(s); err != nil { - return ConfigMarshalError{err} - } - - case "ini": - keys := v.AllKeys() - cfg := ini.Empty() - ini.PrettyFormat = false - for i := 0; i < len(keys); i++ { - key := keys[i] - lastSep := strings.LastIndex(key, ".") - sectionName := key[:(lastSep)] - keyName := key[(lastSep + 1):] - if sectionName == "default" { - sectionName = "" - } - cfg.Section(sectionName).Key(keyName).SetValue(v.GetString(key)) - } - cfg.WriteTo(f) } return nil } @@ -1761,7 +1718,8 @@ func keyExists(k string, m map[string]interface{}) string { } func castToMapStringInterface( - src map[interface{}]interface{}) map[string]interface{} { + src map[interface{}]interface{}, +) map[string]interface{} { tgt := map[string]interface{}{} for k, v := range src { tgt[fmt.Sprintf("%v", k)] = v @@ -1799,7 +1757,8 @@ func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} // deep. Both map types are supported as there is a go-yaml fork that uses // `map[string]interface{}` instead. func mergeMaps( - src, tgt map[string]interface{}, itgt map[interface{}]interface{}) { + src, tgt map[string]interface{}, itgt map[interface{}]interface{}, +) { for sk, sv := range src { tk := keyExists(sk, tgt) if tk == "" { @@ -1823,17 +1782,6 @@ func mergeMaps( svType := reflect.TypeOf(sv) tvType := reflect.TypeOf(tv) - if tvType != nil && svType != tvType { // Allow for the target to be nil - v.logger.Error( - "svType != tvType", - "key", sk, - "st", svType, - "tt", tvType, - "sv", sv, - "tv", tv, - ) - continue - } v.logger.Trace( "processing", @@ -1847,13 +1795,27 @@ func mergeMaps( switch ttv := tv.(type) { case map[interface{}]interface{}: v.logger.Trace("merging maps (must convert)") - tsv := sv.(map[interface{}]interface{}) + tsv, ok := sv.(map[interface{}]interface{}) + if !ok { + v.logger.Error( + "Could not cast sv to map[interface{}]interface{}; key=%s, st=%v, tt=%v, sv=%v, tv=%v", + sk, svType, tvType, sv, tv) + continue + } + ssv := castToMapStringInterface(tsv) stv := castToMapStringInterface(ttv) mergeMaps(ssv, stv, ttv) case map[string]interface{}: v.logger.Trace("merging maps") - mergeMaps(sv.(map[string]interface{}), ttv, nil) + tsv, ok := sv.(map[string]interface{}) + if !ok { + v.logger.Error( + "Could not cast sv to map[string]interface{}; key=%s, st=%v, tt=%v, sv=%v, tv=%v", + sk, svType, tvType, sv, tv) + continue + } + mergeMaps(tsv, ttv, nil) default: v.logger.Trace("setting value") tgt[tk] = sv diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 41649d26..3bb22a97 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -3,6 +3,7 @@ package assert import ( "fmt" "reflect" + "time" ) type CompareType int @@ -30,6 +31,8 @@ var ( float64Type = reflect.TypeOf(float64(1)) stringType = reflect.TypeOf("") + + timeType = reflect.TypeOf(time.Time{}) ) func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { @@ -299,6 +302,27 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return compareLess, true } } + // Check for known struct types we can check for compare results. + case reflect.Struct: + { + // All structs enter here. We're not interested in most types. + if !canConvert(obj1Value, timeType) { + break + } + + // time.Time can compared! + timeObj1, ok := obj1.(time.Time) + if !ok { + timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) + } + + timeObj2, ok := obj2.(time.Time) + if !ok { + timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) + } + + return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + } } return compareEqual, false @@ -310,7 +334,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { // assert.Greater(t, float64(2), float64(1)) // assert.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -320,7 +347,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // assert.GreaterOrEqual(t, "b", "a") // assert.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -329,7 +359,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // assert.Less(t, float64(1), float64(2)) // assert.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -339,7 +372,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // assert.LessOrEqual(t, "a", "b") // assert.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -347,8 +383,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // assert.Positive(t, 1) // assert.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -356,8 +395,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { // assert.Negative(t, -1) // assert.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go new file mode 100644 index 00000000..df22c47f --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go @@ -0,0 +1,16 @@ +//go:build go1.17 +// +build go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_legacy.go + +package assert + +import "reflect" + +// Wrapper around reflect.Value.CanConvert, for compatability +// reasons. +func canConvert(value reflect.Value, to reflect.Type) bool { + return value.CanConvert(to) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go new file mode 100644 index 00000000..1701af2a --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go @@ -0,0 +1,16 @@ +//go:build !go1.17 +// +build !go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_can_convert.go + +package assert + +import "reflect" + +// Older versions of Go does not have the reflect.Value.CanConvert +// method. +func canConvert(value reflect.Value, to reflect.Type) bool { + return false +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 4dfd1229..27e2420e 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) } +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...) +} + // ErrorIsf asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 25337a6f..d9ea368d 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. return ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1c3b4718..75944878 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index bcac4401..0357b223 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -718,10 +718,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (false, false) if impossible. // return (true, false) if element was not found. // return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { +func containsElement(list interface{}, element interface{}) (ok, found bool) { listValue := reflect.ValueOf(list) - listKind := reflect.TypeOf(list).Kind() + listType := reflect.TypeOf(list) + if listType == nil { + return false, false + } + listKind := listType.Kind() defer func() { if e := recover(); e != nil { ok = false @@ -764,7 +768,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) } @@ -787,7 +791,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) } @@ -831,7 +835,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -852,7 +856,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) h.Helper() } if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } subsetValue := reflect.ValueOf(subset) @@ -875,7 +879,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -1000,27 +1004,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { type PanicTestFunc func() // didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}, string) { - - didPanic := false - var message interface{} - var stack string - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - stack = string(debug.Stack()) - } - }() - - // call the target function - f() +func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) { + didPanic = true + defer func() { + message = recover() + if didPanic { + stack = string(debug.Stack()) + } }() - return didPanic, message, stack + // call the target function + f() + didPanic = false + return } // Panics asserts that the code inside the specified PanicTestFunc panics. @@ -1161,11 +1159,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs bf, bok := toFloat(actual) if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + return Fail(t, "Parameters must be numerical", msgAndArgs...) + } + + if math.IsNaN(af) && math.IsNaN(bf) { + return true } if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + return Fail(t, "Expected must not be NaN", msgAndArgs...) } if math.IsNaN(bf) { @@ -1188,7 +1190,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1250,8 +1252,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m func calcRelativeError(expected, actual interface{}) (float64, error) { af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + bf, bok := toFloat(actual) + if !aok || !bok { + return 0, fmt.Errorf("Parameters must be numerical") + } + if math.IsNaN(af) && math.IsNaN(bf) { + return 0, nil } if math.IsNaN(af) { return 0, errors.New("expected value must not be NaN") @@ -1259,10 +1265,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { if af == 0 { return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } if math.IsNaN(bf) { return 0, errors.New("actual value must not be NaN") } @@ -1298,7 +1300,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1375,6 +1377,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte return true } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !Error(t, theError, msgAndArgs...) { + return false + } + + actual := theError.Error() + if !strings.Contains(actual, contains) { + return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...) + } + + return true +} + // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { @@ -1588,12 +1611,17 @@ func diff(expected interface{}, actual interface{}) string { } var e, a string - if et != reflect.TypeOf("") { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { + + switch et { + case reflect.TypeOf(""): e = reflect.ValueOf(expected).String() a = reflect.ValueOf(actual).String() + case reflect.TypeOf(time.Time{}): + e = spewConfigStringerEnabled.Sdump(expected) + a = spewConfigStringerEnabled.Sdump(actual) + default: + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) } diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ @@ -1625,6 +1653,14 @@ var spewConfig = spew.ConfigState{ MaxDepth: 10, } +var spewConfigStringerEnabled = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, + MaxDepth: 10, +} + type tHelper interface { Helper() } diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 51820df2..59c48277 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -280,6 +280,36 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int t.FailNow() } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContains(t, theError, contains, msgAndArgs...) { + return + } + t.FailNow() +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContainsf(t, theError, contains, msg, args...) { + return + } + t.FailNow() +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index ed54a9d8..5bb07c89 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -223,6 +223,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) { |