summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/mattermost/mattermost-server
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/mattermost/mattermost-server')
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/LICENSE.txt897
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/account_migration.go10
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/brand.go14
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/cluster.go25
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/compliance.go13
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/data_retention.go12
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/elasticsearch.go20
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/emoji.go12
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/data_retention.go13
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/elasticsearch.go17
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/ldap_sync.go13
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/message_export.go13
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/ldap.go25
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/message_export.go14
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/metrics.go43
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/mfa.go15
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/oauthproviders.go29
-rw-r--r--vendor/github.com/mattermost/mattermost-server/einterfaces/saml.go15
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/LICENSE.txt897
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/access.go96
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/analytics_row.go41
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/audit.go30
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/audits.go34
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/authorization.go522
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/authorize.go141
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/builtin.go9
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/bundle_info.go23
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/channel.go208
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/channel_count.go54
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/channel_data.go34
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/channel_list.go53
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/channel_member.go148
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/channel_member_history.go15
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/channel_search.go26
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/channel_stats.go25
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/channel_view.go41
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/client.go2379
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/client4.go3299
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/cluster_discovery.go133
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/cluster_info.go50
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/cluster_message.go46
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/cluster_stats.go27
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/command.go139
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/command_args.go34
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/command_response.go61
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/command_webhook.go65
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/compliance.go119
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/compliance_post.go114
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/config.go2238
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/data_retention_policy.go27
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/emoji.go83
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/emoji_search.go25
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/file.go34
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/file_info.go170
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/gitlab.go8
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/gitlab/gitlab.go114
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/incoming_webhook.go206
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/initial_load.go30
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/job.go118
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/ldap.go9
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/license.go219
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/manifest.go228
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/message_export.go19
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/mfa_secret.go25
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/oauth.go164
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/outgoing_webhook.go254
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/plugin_key_value.go32
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/plugins_response.go30
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/post.go492
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/post_list.go138
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/preference.go113
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/preferences.go27
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/push_notification.go68
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/push_response.go54
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/reaction.go76
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/saml.go40
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/scheduled_task.go110
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/search_params.go171
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/security_bulletin.go41
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/session.go137
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/slack_attachment.go59
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/status.go60
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/suggest_command.go25
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/switch_request.go53
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/system.go46
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/team.go294
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/team_member.go94
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/team_search.go35
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/team_stats.go26
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/token.go40
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/user.go616
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/user_access_token.go65
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/user_access_token_search.go35
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/user_autocomplete.go61
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/user_search.go32
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/utils.go486
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/version.go148
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/webrtc.go39
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/websocket_client.go167
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/websocket_message.go132
-rw-r--r--vendor/github.com/mattermost/mattermost-server/model/websocket_request.go34
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/LICENSE.txt897
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/config.go288
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/ConsoleLogWriter_Manual.go14
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/FileLogWriter_Manual.go57
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/SimpleNetLogServer.go42
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/SocketLogWriter_Manual.go18
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/XMLConfigurationExample.go13
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/filelog.go264
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/log4go.go484
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/pattlog.go130
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/socklog.go57
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/termlog.go49
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/wrapper.go278
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/LICENSE.txt897
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/add.go113
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/atomic_value.go13
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/atomic_value_go13.go28
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/bind.go143
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/client.go27
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/compare.go85
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/conn.go470
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/control.go420
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/debug.go24
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/del.go84
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/dn.go247
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/doc.go4
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/error.go155
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/filter.go469
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/ldap.go320
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/modify.go170
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/passwdmodify.go148
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/search.go450
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/LICENSE.txt897
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/dce.go84
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/doc.go8
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/hash.go53
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/marshal.go83
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/node.go117
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/sql.go66
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/time.go132
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/util.go43
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/uuid.go201
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/version1.go41
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/version4.go25
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/LICENSE.txt897
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/base64.go35
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/bcrypt.go295
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/LICENSE.txt897
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/block.go159
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/cipher.go91
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/const.go199
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/LICENSE.txt897
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/ber.go504
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/content_int.go25
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/header.go29
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/identifier.go103
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/length.go81
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/util.go24
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/LICENSE.txt897
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/apic.go742
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/decode.go685
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/emitterc.go1684
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/encode.go306
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/parserc.go1095
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/readerc.go394
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/resolve.go208
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/scannerc.go2711
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/sorter.go104
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/writerc.go89
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yaml.go357
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yamlh.go716
-rw-r--r--vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yamlprivateh.go173
173 files changed, 41115 insertions, 0 deletions
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/einterfaces/LICENSE.txt
new file mode 100644
index 00000000..ead98cf0
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/LICENSE.txt
@@ -0,0 +1,897 @@
+Mattermost Licensing
+
+SOFTWARE LICENSING
+
+You are licensed to use compiled versions of the Mattermost platform produced by Mattermost, Inc. under an MIT LICENSE
+
+- See MIT-COMPILED-LICENSE.md included in compiled versions for details
+
+You may be licensed to use source code to create compiled versions not produced by Mattermost, Inc. in one of two ways:
+
+1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or
+2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com
+
+You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/, model/,
+webapp/client, webapp/fonts, webapp/i18n, webapp/images and all subdirectories thereof) under the Apache License v2.0.
+
+We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not
+link to the Mattermost Platform directly, but exclusively uses the Mattermost Admin Tools and Configuration Files, and
+(b) you have not modified, added to or adapted the source code of Mattermost in a way that results in the creation of
+a “modified version” or “work based on” Mattermost as these terms are defined in the AGPL v3.0 license.
+
+MATTERMOST TRADEMARK GUIDELINES
+
+Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organization’s Trademark
+Standards of Use at http://www.mattermost.org/trademark-standards-of-use/. For trademark approval or any questions
+you have about using these trademarks, please email trademark@mattermost.com
+
+------------------------------------------------------------------------------------------------------------------------------
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+------------------------------------------------------------------------------
+
+The software is released under the terms of the GNU Affero General Public
+License, version 3.
+
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/account_migration.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/account_migration.go
new file mode 100644
index 00000000..0db516d7
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/account_migration.go
@@ -0,0 +1,10 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package einterfaces
+
+import "github.com/mattermost/mattermost-server/model"
+
+type AccountMigrationInterface interface {
+ MigrateToLdap(fromAuthService string, forignUserFieldNameToMatch string, force bool) *model.AppError
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/brand.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/brand.go
new file mode 100644
index 00000000..fc584a91
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/brand.go
@@ -0,0 +1,14 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package einterfaces
+
+import (
+ "github.com/mattermost/mattermost-server/model"
+ "mime/multipart"
+)
+
+type BrandInterface interface {
+ SaveBrandImage(*multipart.FileHeader) *model.AppError
+ GetBrandImage() ([]byte, *model.AppError)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/cluster.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/cluster.go
new file mode 100644
index 00000000..b5ef4772
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/cluster.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package einterfaces
+
+import (
+ "github.com/mattermost/mattermost-server/model"
+)
+
+type ClusterMessageHandler func(msg *model.ClusterMessage)
+
+type ClusterInterface interface {
+ StartInterNodeCommunication()
+ StopInterNodeCommunication()
+ RegisterClusterMessageHandler(event string, crm ClusterMessageHandler)
+ GetClusterId() string
+ IsLeader() bool
+ GetMyClusterInfo() *model.ClusterInfo
+ GetClusterInfos() []*model.ClusterInfo
+ SendClusterMessage(cluster *model.ClusterMessage)
+ NotifyMsg(buf []byte)
+ GetClusterStats() ([]*model.ClusterStats, *model.AppError)
+ GetLogs(page, perPage int) ([]string, *model.AppError)
+ ConfigChanged(previousConfig *model.Config, newConfig *model.Config, sendToOtherServer bool) *model.AppError
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/compliance.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/compliance.go
new file mode 100644
index 00000000..14927bee
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/compliance.go
@@ -0,0 +1,13 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package einterfaces
+
+import (
+ "github.com/mattermost/mattermost-server/model"
+)
+
+type ComplianceInterface interface {
+ StartComplianceDailyJob()
+ RunComplianceJob(job *model.Compliance) *model.AppError
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/data_retention.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/data_retention.go
new file mode 100644
index 00000000..07f7d387
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/data_retention.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package einterfaces
+
+import (
+ "github.com/mattermost/mattermost-server/model"
+)
+
+type DataRetentionInterface interface {
+ GetPolicy() (*model.DataRetentionPolicy, *model.AppError)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/elasticsearch.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/elasticsearch.go
new file mode 100644
index 00000000..5582fd4e
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/elasticsearch.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package einterfaces
+
+import (
+ "time"
+
+ "github.com/mattermost/mattermost-server/model"
+)
+
+type ElasticsearchInterface interface {
+ Start() *model.AppError
+ IndexPost(post *model.Post, teamId string) *model.AppError
+ SearchPosts(channels *model.ChannelList, searchParams []*model.SearchParams) ([]string, *model.AppError)
+ DeletePost(post *model.Post) *model.AppError
+ TestConfig(cfg *model.Config) *model.AppError
+ PurgeIndexes() *model.AppError
+ DataRetentionDeleteIndexes(cutoff time.Time) *model.AppError
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/emoji.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/emoji.go
new file mode 100644
index 00000000..b8d61e74
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/emoji.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package einterfaces
+
+import (
+ "github.com/mattermost/mattermost-server/model"
+)
+
+type EmojiInterface interface {
+ CanUserCreateEmoji(string, []*model.TeamMember) bool
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/data_retention.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/data_retention.go
new file mode 100644
index 00000000..73f78e4f
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/data_retention.go
@@ -0,0 +1,13 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package jobs
+
+import (
+ "github.com/mattermost/mattermost-server/model"
+)
+
+type DataRetentionJobInterface interface {
+ MakeWorker() model.Worker
+ MakeScheduler() model.Scheduler
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/elasticsearch.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/elasticsearch.go
new file mode 100644
index 00000000..16e0d769
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/elasticsearch.go
@@ -0,0 +1,17 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package jobs
+
+import (
+ "github.com/mattermost/mattermost-server/model"
+)
+
+type ElasticsearchIndexerInterface interface {
+ MakeWorker() model.Worker
+}
+
+type ElasticsearchAggregatorInterface interface {
+ MakeWorker() model.Worker
+ MakeScheduler() model.Scheduler
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/ldap_sync.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/ldap_sync.go
new file mode 100644
index 00000000..5565afe4
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/ldap_sync.go
@@ -0,0 +1,13 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package jobs
+
+import (
+ "github.com/mattermost/mattermost-server/model"
+)
+
+type LdapSyncInterface interface {
+ MakeWorker() model.Worker
+ MakeScheduler() model.Scheduler
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/message_export.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/message_export.go
new file mode 100644
index 00000000..74b0df75
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/message_export.go
@@ -0,0 +1,13 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package jobs
+
+import (
+ "github.com/mattermost/mattermost-server/model"
+)
+
+type MessageExportJobInterface interface {
+ MakeWorker() model.Worker
+ MakeScheduler() model.Scheduler
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/ldap.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/ldap.go
new file mode 100644
index 00000000..26326b17
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/ldap.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package einterfaces
+
+import (
+ "github.com/go-ldap/ldap"
+
+ "github.com/mattermost/mattermost-server/model"
+)
+
+type LdapInterface interface {
+ DoLogin(id string, password string) (*model.User, *model.AppError)
+ GetUser(id string) (*model.User, *model.AppError)
+ GetUserAttributes(id string, attributes []string) (map[string]string, *model.AppError)
+ CheckPassword(id string, password string) *model.AppError
+ SwitchToLdap(userId, ldapId, ldapPassword string) *model.AppError
+ ValidateFilter(filter string) *model.AppError
+ StartSynchronizeJob(waitForJobToFinish bool) (*model.Job, *model.AppError)
+ RunTest() *model.AppError
+ GetAllLdapUsers() ([]*model.User, *model.AppError)
+ UserFromLdapUser(ldapUser *ldap.Entry) *model.User
+ UserHasUpdateFromLdap(existingUser *model.User, currentLdapUser *model.User) bool
+ UpdateLocalLdapUser(existingUser *model.User, currentLdapUser *model.User) *model.User
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/message_export.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/message_export.go
new file mode 100644
index 00000000..ba498cdf
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/message_export.go
@@ -0,0 +1,14 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package einterfaces
+
+import (
+ "context"
+
+ "github.com/mattermost/mattermost-server/model"
+)
+
+type MessageExportInterface interface {
+ StartSynchronizeJob(ctx context.Context, exportFromTimestamp int64) (*model.Job, *model.AppError)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/metrics.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/metrics.go
new file mode 100644
index 00000000..a88fe63c
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/metrics.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package einterfaces
+
+type MetricsInterface interface {
+ StartServer()
+ StopServer()
+
+ IncrementPostCreate()
+ IncrementWebhookPost()
+ IncrementPostSentEmail()
+ IncrementPostSentPush()
+ IncrementPostBroadcast()
+ IncrementPostFileAttachment(count int)
+
+ IncrementHttpRequest()
+ IncrementHttpError()
+ ObserveHttpRequestDuration(elapsed float64)
+
+ IncrementClusterRequest()
+ ObserveClusterRequestDuration(elapsed float64)
+
+ IncrementLogin()
+ IncrementLoginFail()
+
+ IncrementEtagHitCounter(route string)
+ IncrementEtagMissCounter(route string)
+
+ IncrementMemCacheHitCounter(cacheName string)
+ IncrementMemCacheMissCounter(cacheName string)
+ IncrementMemCacheMissCounterSession()
+ IncrementMemCacheHitCounterSession()
+
+ IncrementWebsocketEvent(eventType string)
+ IncrementWebSocketBroadcast(eventType string)
+
+ AddMemCacheHitCounter(cacheName string, amount float64)
+ AddMemCacheMissCounter(cacheName string, amount float64)
+
+ IncrementPostsSearchCounter()
+ ObservePostsSearchDuration(elapsed float64)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/mfa.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/mfa.go
new file mode 100644
index 00000000..3afe961e
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/mfa.go
@@ -0,0 +1,15 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package einterfaces
+
+import (
+ "github.com/mattermost/mattermost-server/model"
+)
+
+type MfaInterface interface {
+ GenerateSecret(user *model.User) (string, []byte, *model.AppError)
+ Activate(user *model.User, token string) *model.AppError
+ Deactivate(userId string) *model.AppError
+ ValidateToken(secret, token string) (bool, *model.AppError)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/oauthproviders.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/oauthproviders.go
new file mode 100644
index 00000000..7e24d2a7
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/oauthproviders.go
@@ -0,0 +1,29 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package einterfaces
+
+import (
+ "github.com/mattermost/mattermost-server/model"
+ "io"
+)
+
+type OauthProvider interface {
+ GetIdentifier() string
+ GetUserFromJson(data io.Reader) *model.User
+ GetAuthDataFromJson(data io.Reader) string
+}
+
+var oauthProviders = make(map[string]OauthProvider)
+
+func RegisterOauthProvider(name string, newProvider OauthProvider) {
+ oauthProviders[name] = newProvider
+}
+
+func GetOauthProvider(name string) OauthProvider {
+ provider, ok := oauthProviders[name]
+ if ok {
+ return provider
+ }
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/saml.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/saml.go
new file mode 100644
index 00000000..833a3d43
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/saml.go
@@ -0,0 +1,15 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package einterfaces
+
+import (
+ "github.com/mattermost/mattermost-server/model"
+)
+
+type SamlInterface interface {
+ ConfigureSP() *model.AppError
+ BuildRequest(relayState string) (*model.SamlAuthRequest, *model.AppError)
+ DoLogin(encodedXML string, relayState map[string]string) (*model.User, *model.AppError)
+ GetMetadata() (string, *model.AppError)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/model/LICENSE.txt
new file mode 100644
index 00000000..ead98cf0
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/LICENSE.txt
@@ -0,0 +1,897 @@
+Mattermost Licensing
+
+SOFTWARE LICENSING
+
+You are licensed to use compiled versions of the Mattermost platform produced by Mattermost, Inc. under an MIT LICENSE
+
+- See MIT-COMPILED-LICENSE.md included in compiled versions for details
+
+You may be licensed to use source code to create compiled versions not produced by Mattermost, Inc. in one of two ways:
+
+1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or
+2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com
+
+You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/, model/,
+webapp/client, webapp/fonts, webapp/i18n, webapp/images and all subdirectories thereof) under the Apache License v2.0.
+
+We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not
+link to the Mattermost Platform directly, but exclusively uses the Mattermost Admin Tools and Configuration Files, and
+(b) you have not modified, added to or adapted the source code of Mattermost in a way that results in the creation of
+a “modified version” or “work based on” Mattermost as these terms are defined in the AGPL v3.0 license.
+
+MATTERMOST TRADEMARK GUIDELINES
+
+Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organization’s Trademark
+Standards of Use at http://www.mattermost.org/trademark-standards-of-use/. For trademark approval or any questions
+you have about using these trademarks, please email trademark@mattermost.com
+
+------------------------------------------------------------------------------------------------------------------------------
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+------------------------------------------------------------------------------
+
+The software is released under the terms of the GNU Affero General Public
+License, version 3.
+
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/vendor/github.com/mattermost/mattermost-server/model/access.go b/vendor/github.com/mattermost/mattermost-server/model/access.go
new file mode 100644
index 00000000..e9603c78
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/access.go
@@ -0,0 +1,96 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+)
+
+const (
+ ACCESS_TOKEN_GRANT_TYPE = "authorization_code"
+ ACCESS_TOKEN_TYPE = "bearer"
+ REFRESH_TOKEN_GRANT_TYPE = "refresh_token"
+)
+
+type AccessData struct {
+ ClientId string `json:"client_id"`
+ UserId string `json:"user_id"`
+ Token string `json:"token"`
+ RefreshToken string `json:"refresh_token"`
+ RedirectUri string `json:"redirect_uri"`
+ ExpiresAt int64 `json:"expires_at"`
+ Scope string `json:"scope"`
+}
+
+type AccessResponse struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ ExpiresIn int32 `json:"expires_in"`
+ Scope string `json:"scope"`
+ RefreshToken string `json:"refresh_token"`
+}
+
+// IsValid validates the AccessData and returns an error if it isn't configured
+// correctly.
+func (ad *AccessData) IsValid() *AppError {
+
+ if len(ad.ClientId) == 0 || len(ad.ClientId) > 26 {
+ return NewAppError("AccessData.IsValid", "model.access.is_valid.client_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(ad.UserId) == 0 || len(ad.UserId) > 26 {
+ return NewAppError("AccessData.IsValid", "model.access.is_valid.user_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(ad.Token) != 26 {
+ return NewAppError("AccessData.IsValid", "model.access.is_valid.access_token.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(ad.RefreshToken) > 26 {
+ return NewAppError("AccessData.IsValid", "model.access.is_valid.refresh_token.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(ad.RedirectUri) == 0 || len(ad.RedirectUri) > 256 || !IsValidHttpUrl(ad.RedirectUri) {
+ return NewAppError("AccessData.IsValid", "model.access.is_valid.redirect_uri.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (me *AccessData) IsExpired() bool {
+
+ if me.ExpiresAt <= 0 {
+ return false
+ }
+
+ if GetMillis() > me.ExpiresAt {
+ return true
+ }
+
+ return false
+}
+
+func (ad *AccessData) ToJson() string {
+ b, _ := json.Marshal(ad)
+ return string(b)
+}
+
+func AccessDataFromJson(data io.Reader) *AccessData {
+ var ad *AccessData
+ json.NewDecoder(data).Decode(&ad)
+ return ad
+}
+
+func (ar *AccessResponse) ToJson() string {
+ b, _ := json.Marshal(ar)
+ return string(b)
+}
+
+func AccessResponseFromJson(data io.Reader) *AccessResponse {
+ var ar *AccessResponse
+ json.NewDecoder(data).Decode(&ar)
+ return ar
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/analytics_row.go b/vendor/github.com/mattermost/mattermost-server/model/analytics_row.go
new file mode 100644
index 00000000..4615bb79
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/analytics_row.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type AnalyticsRow struct {
+ Name string `json:"name"`
+ Value float64 `json:"value"`
+}
+
+type AnalyticsRows []*AnalyticsRow
+
+func (me *AnalyticsRow) ToJson() string {
+ b, _ := json.Marshal(me)
+ return string(b)
+}
+
+func AnalyticsRowFromJson(data io.Reader) *AnalyticsRow {
+ var me *AnalyticsRow
+ json.NewDecoder(data).Decode(&me)
+ return me
+}
+
+func (me AnalyticsRows) ToJson() string {
+ if b, err := json.Marshal(me); err != nil {
+ return "[]"
+ } else {
+ return string(b)
+ }
+}
+
+func AnalyticsRowsFromJson(data io.Reader) AnalyticsRows {
+ var me AnalyticsRows
+ json.NewDecoder(data).Decode(&me)
+ return me
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/audit.go b/vendor/github.com/mattermost/mattermost-server/model/audit.go
new file mode 100644
index 00000000..e3d1bdf9
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/audit.go
@@ -0,0 +1,30 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type Audit struct {
+ Id string `json:"id"`
+ CreateAt int64 `json:"create_at"`
+ UserId string `json:"user_id"`
+ Action string `json:"action"`
+ ExtraInfo string `json:"extra_info"`
+ IpAddress string `json:"ip_address"`
+ SessionId string `json:"session_id"`
+}
+
+func (o *Audit) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func AuditFromJson(data io.Reader) *Audit {
+ var o *Audit
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/audits.go b/vendor/github.com/mattermost/mattermost-server/model/audits.go
new file mode 100644
index 00000000..3673eb61
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/audits.go
@@ -0,0 +1,34 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type Audits []Audit
+
+func (o Audits) Etag() string {
+ if len(o) > 0 {
+ // the first in the list is always the most current
+ return Etag(o[0].CreateAt)
+ } else {
+ return ""
+ }
+}
+
+func (o Audits) ToJson() string {
+ if b, err := json.Marshal(o); err != nil {
+ return "[]"
+ } else {
+ return string(b)
+ }
+}
+
+func AuditsFromJson(data io.Reader) Audits {
+ var o Audits
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/authorization.go b/vendor/github.com/mattermost/mattermost-server/model/authorization.go
new file mode 100644
index 00000000..9f4e36ea
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/authorization.go
@@ -0,0 +1,522 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+type Permission struct {
+ Id string `json:"id"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+}
+
+type Role struct {
+ Id string `json:"id"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Permissions []string `json:"permissions"`
+}
+
+var PERMISSION_INVITE_USER *Permission
+var PERMISSION_ADD_USER_TO_TEAM *Permission
+var PERMISSION_USE_SLASH_COMMANDS *Permission
+var PERMISSION_MANAGE_SLASH_COMMANDS *Permission
+var PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS *Permission
+var PERMISSION_CREATE_PUBLIC_CHANNEL *Permission
+var PERMISSION_CREATE_PRIVATE_CHANNEL *Permission
+var PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS *Permission
+var PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS *Permission
+var PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE *Permission
+var PERMISSION_MANAGE_ROLES *Permission
+var PERMISSION_MANAGE_TEAM_ROLES *Permission
+var PERMISSION_MANAGE_CHANNEL_ROLES *Permission
+var PERMISSION_CREATE_DIRECT_CHANNEL *Permission
+var PERMISSION_CREATE_GROUP_CHANNEL *Permission
+var PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES *Permission
+var PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES *Permission
+var PERMISSION_LIST_TEAM_CHANNELS *Permission
+var PERMISSION_JOIN_PUBLIC_CHANNELS *Permission
+var PERMISSION_DELETE_PUBLIC_CHANNEL *Permission
+var PERMISSION_DELETE_PRIVATE_CHANNEL *Permission
+var PERMISSION_EDIT_OTHER_USERS *Permission
+var PERMISSION_READ_CHANNEL *Permission
+var PERMISSION_READ_PUBLIC_CHANNEL *Permission
+var PERMISSION_PERMANENT_DELETE_USER *Permission
+var PERMISSION_UPLOAD_FILE *Permission
+var PERMISSION_GET_PUBLIC_LINK *Permission
+var PERMISSION_MANAGE_WEBHOOKS *Permission
+var PERMISSION_MANAGE_OTHERS_WEBHOOKS *Permission
+var PERMISSION_MANAGE_OAUTH *Permission
+var PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH *Permission
+var PERMISSION_CREATE_POST *Permission
+var PERMISSION_CREATE_POST_PUBLIC *Permission
+var PERMISSION_EDIT_POST *Permission
+var PERMISSION_EDIT_OTHERS_POSTS *Permission
+var PERMISSION_DELETE_POST *Permission
+var PERMISSION_DELETE_OTHERS_POSTS *Permission
+var PERMISSION_REMOVE_USER_FROM_TEAM *Permission
+var PERMISSION_CREATE_TEAM *Permission
+var PERMISSION_MANAGE_TEAM *Permission
+var PERMISSION_IMPORT_TEAM *Permission
+var PERMISSION_VIEW_TEAM *Permission
+var PERMISSION_LIST_USERS_WITHOUT_TEAM *Permission
+var PERMISSION_MANAGE_JOBS *Permission
+var PERMISSION_CREATE_USER_ACCESS_TOKEN *Permission
+var PERMISSION_READ_USER_ACCESS_TOKEN *Permission
+var PERMISSION_REVOKE_USER_ACCESS_TOKEN *Permission
+
+// General permission that encompases all system admin functions
+// in the future this could be broken up to allow access to some
+// admin functions but not others
+var PERMISSION_MANAGE_SYSTEM *Permission
+
+const (
+ SYSTEM_USER_ROLE_ID = "system_user"
+ SYSTEM_ADMIN_ROLE_ID = "system_admin"
+ SYSTEM_POST_ALL_ROLE_ID = "system_post_all"
+ SYSTEM_POST_ALL_PUBLIC_ROLE_ID = "system_post_all_public"
+ SYSTEM_USER_ACCESS_TOKEN_ROLE_ID = "system_user_access_token"
+
+ TEAM_USER_ROLE_ID = "team_user"
+ TEAM_ADMIN_ROLE_ID = "team_admin"
+ TEAM_POST_ALL_ROLE_ID = "team_post_all"
+ TEAM_POST_ALL_PUBLIC_ROLE_ID = "team_post_all_public"
+
+ CHANNEL_USER_ROLE_ID = "channel_user"
+ CHANNEL_ADMIN_ROLE_ID = "channel_admin"
+ CHANNEL_GUEST_ROLE_ID = "guest"
+)
+
+func initializePermissions() {
+ PERMISSION_INVITE_USER = &Permission{
+ "invite_user",
+ "authentication.permissions.team_invite_user.name",
+ "authentication.permissions.team_invite_user.description",
+ }
+ PERMISSION_ADD_USER_TO_TEAM = &Permission{
+ "add_user_to_team",
+ "authentication.permissions.add_user_to_team.name",
+ "authentication.permissions.add_user_to_team.description",
+ }
+ PERMISSION_USE_SLASH_COMMANDS = &Permission{
+ "use_slash_commands",
+ "authentication.permissions.team_use_slash_commands.name",
+ "authentication.permissions.team_use_slash_commands.description",
+ }
+ PERMISSION_MANAGE_SLASH_COMMANDS = &Permission{
+ "manage_slash_commands",
+ "authentication.permissions.manage_slash_commands.name",
+ "authentication.permissions.manage_slash_commands.description",
+ }
+ PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS = &Permission{
+ "manage_others_slash_commands",
+ "authentication.permissions.manage_others_slash_commands.name",
+ "authentication.permissions.manage_others_slash_commands.description",
+ }
+ PERMISSION_CREATE_PUBLIC_CHANNEL = &Permission{
+ "create_public_channel",
+ "authentication.permissions.create_public_channel.name",
+ "authentication.permissions.create_public_channel.description",
+ }
+ PERMISSION_CREATE_PRIVATE_CHANNEL = &Permission{
+ "create_private_channel",
+ "authentication.permissions.create_private_channel.name",
+ "authentication.permissions.create_private_channel.description",
+ }
+ PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS = &Permission{
+ "manage_public_channel_members",
+ "authentication.permissions.manage_public_channel_members.name",
+ "authentication.permissions.manage_public_channel_members.description",
+ }
+ PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS = &Permission{
+ "manage_private_channel_members",
+ "authentication.permissions.manage_private_channel_members.name",
+ "authentication.permissions.manage_private_channel_members.description",
+ }
+ PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE = &Permission{
+ "assign_system_admin_role",
+ "authentication.permissions.assign_system_admin_role.name",
+ "authentication.permissions.assign_system_admin_role.description",
+ }
+ PERMISSION_MANAGE_ROLES = &Permission{
+ "manage_roles",
+ "authentication.permissions.manage_roles.name",
+ "authentication.permissions.manage_roles.description",
+ }
+ PERMISSION_MANAGE_TEAM_ROLES = &Permission{
+ "manage_team_roles",
+ "authentication.permissions.manage_team_roles.name",
+ "authentication.permissions.manage_team_roles.description",
+ }
+ PERMISSION_MANAGE_CHANNEL_ROLES = &Permission{
+ "manage_channel_roles",
+ "authentication.permissions.manage_channel_roles.name",
+ "authentication.permissions.manage_channel_roles.description",
+ }
+ PERMISSION_MANAGE_SYSTEM = &Permission{
+ "manage_system",
+ "authentication.permissions.manage_system.name",
+ "authentication.permissions.manage_system.description",
+ }
+ PERMISSION_CREATE_DIRECT_CHANNEL = &Permission{
+ "create_direct_channel",
+ "authentication.permissions.create_direct_channel.name",
+ "authentication.permissions.create_direct_channel.description",
+ }
+ PERMISSION_CREATE_GROUP_CHANNEL = &Permission{
+ "create_group_channel",
+ "authentication.permissions.create_group_channel.name",
+ "authentication.permissions.create_group_channel.description",
+ }
+ PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES = &Permission{
+ "manage__publicchannel_properties",
+ "authentication.permissions.manage_public_channel_properties.name",
+ "authentication.permissions.manage_public_channel_properties.description",
+ }
+ PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES = &Permission{
+ "manage_private_channel_properties",
+ "authentication.permissions.manage_private_channel_properties.name",
+ "authentication.permissions.manage_private_channel_properties.description",
+ }
+ PERMISSION_LIST_TEAM_CHANNELS = &Permission{
+ "list_team_channels",
+ "authentication.permissions.list_team_channels.name",
+ "authentication.permissions.list_team_channels.description",
+ }
+ PERMISSION_JOIN_PUBLIC_CHANNELS = &Permission{
+ "join_public_channels",
+ "authentication.permissions.join_public_channels.name",
+ "authentication.permissions.join_public_channels.description",
+ }
+ PERMISSION_DELETE_PUBLIC_CHANNEL = &Permission{
+ "delete_public_channel",
+ "authentication.permissions.delete_public_channel.name",
+ "authentication.permissions.delete_public_channel.description",
+ }
+ PERMISSION_DELETE_PRIVATE_CHANNEL = &Permission{
+ "delete_private_channel",
+ "authentication.permissions.delete_private_channel.name",
+ "authentication.permissions.delete_private_channel.description",
+ }
+ PERMISSION_EDIT_OTHER_USERS = &Permission{
+ "edit_other_users",
+ "authentication.permissions.edit_other_users.name",
+ "authentication.permissions.edit_other_users.description",
+ }
+ PERMISSION_READ_CHANNEL = &Permission{
+ "read_channel",
+ "authentication.permissions.read_channel.name",
+ "authentication.permissions.read_channel.description",
+ }
+ PERMISSION_READ_PUBLIC_CHANNEL = &Permission{
+ "read_public_channel",
+ "authentication.permissions.read_public_channel.name",
+ "authentication.permissions.read_public_channel.description",
+ }
+ PERMISSION_PERMANENT_DELETE_USER = &Permission{
+ "permanent_delete_user",
+ "authentication.permissions.permanent_delete_user.name",
+ "authentication.permissions.permanent_delete_user.description",
+ }
+ PERMISSION_UPLOAD_FILE = &Permission{
+ "upload_file",
+ "authentication.permissions.upload_file.name",
+ "authentication.permissions.upload_file.description",
+ }
+ PERMISSION_GET_PUBLIC_LINK = &Permission{
+ "get_public_link",
+ "authentication.permissions.get_public_link.name",
+ "authentication.permissions.get_public_link.description",
+ }
+ PERMISSION_MANAGE_WEBHOOKS = &Permission{
+ "manage_webhooks",
+ "authentication.permissions.manage_webhooks.name",
+ "authentication.permissions.manage_webhooks.description",
+ }
+ PERMISSION_MANAGE_OTHERS_WEBHOOKS = &Permission{
+ "manage_others_webhooks",
+ "authentication.permissions.manage_others_webhooks.name",
+ "authentication.permissions.manage_others_webhooks.description",
+ }
+ PERMISSION_MANAGE_OAUTH = &Permission{
+ "manage_oauth",
+ "authentication.permissions.manage_oauth.name",
+ "authentication.permissions.manage_oauth.description",
+ }
+ PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH = &Permission{
+ "manage_sytem_wide_oauth",
+ "authentication.permissions.manage_sytem_wide_oauth.name",
+ "authentication.permissions.manage_sytem_wide_oauth.description",
+ }
+ PERMISSION_CREATE_POST = &Permission{
+ "create_post",
+ "authentication.permissions.create_post.name",
+ "authentication.permissions.create_post.description",
+ }
+ PERMISSION_CREATE_POST_PUBLIC = &Permission{
+ "create_post_public",
+ "authentication.permissions.create_post_public.name",
+ "authentication.permissions.create_post_public.description",
+ }
+ PERMISSION_EDIT_POST = &Permission{
+ "edit_post",
+ "authentication.permissions.edit_post.name",
+ "authentication.permissions.edit_post.description",
+ }
+ PERMISSION_EDIT_OTHERS_POSTS = &Permission{
+ "edit_others_posts",
+ "authentication.permissions.edit_others_posts.name",
+ "authentication.permissions.edit_others_posts.description",
+ }
+ PERMISSION_DELETE_POST = &Permission{
+ "delete_post",
+ "authentication.permissions.delete_post.name",
+ "authentication.permissions.delete_post.description",
+ }
+ PERMISSION_DELETE_OTHERS_POSTS = &Permission{
+ "delete_others_posts",
+ "authentication.permissions.delete_others_posts.name",
+ "authentication.permissions.delete_others_posts.description",
+ }
+ PERMISSION_REMOVE_USER_FROM_TEAM = &Permission{
+ "remove_user_from_team",
+ "authentication.permissions.remove_user_from_team.name",
+ "authentication.permissions.remove_user_from_team.description",
+ }
+ PERMISSION_CREATE_TEAM = &Permission{
+ "create_team",
+ "authentication.permissions.create_team.name",
+ "authentication.permissions.create_team.description",
+ }
+ PERMISSION_MANAGE_TEAM = &Permission{
+ "manage_team",
+ "authentication.permissions.manage_team.name",
+ "authentication.permissions.manage_team.description",
+ }
+ PERMISSION_IMPORT_TEAM = &Permission{
+ "import_team",
+ "authentication.permissions.import_team.name",
+ "authentication.permissions.import_team.description",
+ }
+ PERMISSION_VIEW_TEAM = &Permission{
+ "view_team",
+ "authentication.permissions.view_team.name",
+ "authentication.permissions.view_team.description",
+ }
+ PERMISSION_LIST_USERS_WITHOUT_TEAM = &Permission{
+ "list_users_without_team",
+ "authentication.permissions.list_users_without_team.name",
+ "authentication.permissions.list_users_without_team.description",
+ }
+ PERMISSION_CREATE_USER_ACCESS_TOKEN = &Permission{
+ "create_user_access_token",
+ "authentication.permissions.create_user_access_token.name",
+ "authentication.permissions.create_user_access_token.description",
+ }
+ PERMISSION_READ_USER_ACCESS_TOKEN = &Permission{
+ "read_user_access_token",
+ "authentication.permissions.read_user_access_token.name",
+ "authentication.permissions.read_user_access_token.description",
+ }
+ PERMISSION_REVOKE_USER_ACCESS_TOKEN = &Permission{
+ "revoke_user_access_token",
+ "authentication.permissions.revoke_user_access_token.name",
+ "authentication.permissions.revoke_user_access_token.description",
+ }
+ PERMISSION_MANAGE_JOBS = &Permission{
+ "manage_jobs",
+ "authentication.permisssions.manage_jobs.name",
+ "authentication.permisssions.manage_jobs.description",
+ }
+}
+
+var DefaultRoles map[string]*Role
+
+func initializeDefaultRoles() {
+ DefaultRoles = make(map[string]*Role)
+
+ DefaultRoles[CHANNEL_USER_ROLE_ID] = &Role{
+ "channel_user",
+ "authentication.roles.channel_user.name",
+ "authentication.roles.channel_user.description",
+ []string{
+ PERMISSION_READ_CHANNEL.Id,
+ PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id,
+ PERMISSION_UPLOAD_FILE.Id,
+ PERMISSION_GET_PUBLIC_LINK.Id,
+ PERMISSION_CREATE_POST.Id,
+ PERMISSION_EDIT_POST.Id,
+ PERMISSION_USE_SLASH_COMMANDS.Id,
+ },
+ }
+
+ DefaultRoles[CHANNEL_ADMIN_ROLE_ID] = &Role{
+ "channel_admin",
+ "authentication.roles.channel_admin.name",
+ "authentication.roles.channel_admin.description",
+ []string{
+ PERMISSION_MANAGE_CHANNEL_ROLES.Id,
+ },
+ }
+
+ DefaultRoles[CHANNEL_GUEST_ROLE_ID] = &Role{
+ "guest",
+ "authentication.roles.global_guest.name",
+ "authentication.roles.global_guest.description",
+ []string{},
+ }
+
+ DefaultRoles[TEAM_USER_ROLE_ID] = &Role{
+ "team_user",
+ "authentication.roles.team_user.name",
+ "authentication.roles.team_user.description",
+ []string{
+ PERMISSION_LIST_TEAM_CHANNELS.Id,
+ PERMISSION_JOIN_PUBLIC_CHANNELS.Id,
+ PERMISSION_READ_PUBLIC_CHANNEL.Id,
+ PERMISSION_VIEW_TEAM.Id,
+ },
+ }
+
+ DefaultRoles[TEAM_POST_ALL_ROLE_ID] = &Role{
+ "team_post_all",
+ "authentication.roles.team_post_all.name",
+ "authentication.roles.team_post_all.description",
+ []string{
+ PERMISSION_CREATE_POST.Id,
+ },
+ }
+
+ DefaultRoles[TEAM_POST_ALL_PUBLIC_ROLE_ID] = &Role{
+ "team_post_all_public",
+ "authentication.roles.team_post_all_public.name",
+ "authentication.roles.team_post_all_public.description",
+ []string{
+ PERMISSION_CREATE_POST_PUBLIC.Id,
+ },
+ }
+
+ DefaultRoles[TEAM_ADMIN_ROLE_ID] = &Role{
+ "team_admin",
+ "authentication.roles.team_admin.name",
+ "authentication.roles.team_admin.description",
+ []string{
+ PERMISSION_EDIT_OTHERS_POSTS.Id,
+ PERMISSION_REMOVE_USER_FROM_TEAM.Id,
+ PERMISSION_MANAGE_TEAM.Id,
+ PERMISSION_IMPORT_TEAM.Id,
+ PERMISSION_MANAGE_TEAM_ROLES.Id,
+ PERMISSION_MANAGE_CHANNEL_ROLES.Id,
+ PERMISSION_MANAGE_OTHERS_WEBHOOKS.Id,
+ PERMISSION_MANAGE_SLASH_COMMANDS.Id,
+ PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS.Id,
+ PERMISSION_MANAGE_WEBHOOKS.Id,
+ },
+ }
+
+ DefaultRoles[SYSTEM_USER_ROLE_ID] = &Role{
+ "system_user",
+ "authentication.roles.global_user.name",
+ "authentication.roles.global_user.description",
+ []string{
+ PERMISSION_CREATE_DIRECT_CHANNEL.Id,
+ PERMISSION_CREATE_GROUP_CHANNEL.Id,
+ PERMISSION_PERMANENT_DELETE_USER.Id,
+ },
+ }
+
+ DefaultRoles[SYSTEM_POST_ALL_ROLE_ID] = &Role{
+ "system_post_all",
+ "authentication.roles.system_post_all.name",
+ "authentication.roles.system_post_all.description",
+ []string{
+ PERMISSION_CREATE_POST.Id,
+ },
+ }
+
+ DefaultRoles[SYSTEM_POST_ALL_PUBLIC_ROLE_ID] = &Role{
+ "system_post_all_public",
+ "authentication.roles.system_post_all_public.name",
+ "authentication.roles.system_post_all_public.description",
+ []string{
+ PERMISSION_CREATE_POST_PUBLIC.Id,
+ },
+ }
+
+ DefaultRoles[SYSTEM_USER_ACCESS_TOKEN_ROLE_ID] = &Role{
+ "system_user_access_token",
+ "authentication.roles.system_user_access_token.name",
+ "authentication.roles.system_user_access_token.description",
+ []string{
+ PERMISSION_CREATE_USER_ACCESS_TOKEN.Id,
+ PERMISSION_READ_USER_ACCESS_TOKEN.Id,
+ PERMISSION_REVOKE_USER_ACCESS_TOKEN.Id,
+ },
+ }
+
+ DefaultRoles[SYSTEM_ADMIN_ROLE_ID] = &Role{
+ "system_admin",
+ "authentication.roles.global_admin.name",
+ "authentication.roles.global_admin.description",
+ // System admins can do anything channel and team admins can do
+ // plus everything members of teams and channels can do to all teams
+ // and channels on the system
+ append(
+ append(
+ append(
+ append(
+ []string{
+ PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE.Id,
+ PERMISSION_MANAGE_SYSTEM.Id,
+ PERMISSION_MANAGE_ROLES.Id,
+ PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES.Id,
+ PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id,
+ PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS.Id,
+ PERMISSION_DELETE_PUBLIC_CHANNEL.Id,
+ PERMISSION_CREATE_PUBLIC_CHANNEL.Id,
+ PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES.Id,
+ PERMISSION_DELETE_PRIVATE_CHANNEL.Id,
+ PERMISSION_CREATE_PRIVATE_CHANNEL.Id,
+ PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH.Id,
+ PERMISSION_MANAGE_OTHERS_WEBHOOKS.Id,
+ PERMISSION_EDIT_OTHER_USERS.Id,
+ PERMISSION_MANAGE_OAUTH.Id,
+ PERMISSION_INVITE_USER.Id,
+ PERMISSION_DELETE_POST.Id,
+ PERMISSION_DELETE_OTHERS_POSTS.Id,
+ PERMISSION_CREATE_TEAM.Id,
+ PERMISSION_ADD_USER_TO_TEAM.Id,
+ PERMISSION_LIST_USERS_WITHOUT_TEAM.Id,
+ PERMISSION_MANAGE_JOBS.Id,
+ PERMISSION_CREATE_POST_PUBLIC.Id,
+ PERMISSION_CREATE_USER_ACCESS_TOKEN.Id,
+ PERMISSION_READ_USER_ACCESS_TOKEN.Id,
+ PERMISSION_REVOKE_USER_ACCESS_TOKEN.Id,
+ },
+ DefaultRoles[TEAM_USER_ROLE_ID].Permissions...,
+ ),
+ DefaultRoles[CHANNEL_USER_ROLE_ID].Permissions...,
+ ),
+ DefaultRoles[TEAM_ADMIN_ROLE_ID].Permissions...,
+ ),
+ DefaultRoles[CHANNEL_ADMIN_ROLE_ID].Permissions...,
+ ),
+ }
+}
+
+func RoleIdsToString(roles []string) string {
+ output := ""
+ for _, role := range roles {
+ output += role + ", "
+ }
+
+ if output == "" {
+ return "[<NO ROLES>]"
+ }
+
+ return output[:len(output)-1]
+}
+
+func init() {
+ initializePermissions()
+ initializeDefaultRoles()
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/authorize.go b/vendor/github.com/mattermost/mattermost-server/model/authorize.go
new file mode 100644
index 00000000..2296e7e2
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/authorize.go
@@ -0,0 +1,141 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+)
+
+const (
+ AUTHCODE_EXPIRE_TIME = 60 * 10 // 10 minutes
+ AUTHCODE_RESPONSE_TYPE = "code"
+ DEFAULT_SCOPE = "user"
+)
+
+type AuthData struct {
+ ClientId string `json:"client_id"`
+ UserId string `json:"user_id"`
+ Code string `json:"code"`
+ ExpiresIn int32 `json:"expires_in"`
+ CreateAt int64 `json:"create_at"`
+ RedirectUri string `json:"redirect_uri"`
+ State string `json:"state"`
+ Scope string `json:"scope"`
+}
+
+type AuthorizeRequest struct {
+ ResponseType string `json:"response_type"`
+ ClientId string `json:"client_id"`
+ RedirectUri string `json:"redirect_uri"`
+ Scope string `json:"scope"`
+ State string `json:"state"`
+}
+
+// IsValid validates the AuthData and returns an error if it isn't configured
+// correctly.
+func (ad *AuthData) IsValid() *AppError {
+
+ if len(ad.ClientId) != 26 {
+ return NewAppError("AuthData.IsValid", "model.authorize.is_valid.client_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(ad.UserId) != 26 {
+ return NewAppError("AuthData.IsValid", "model.authorize.is_valid.user_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(ad.Code) == 0 || len(ad.Code) > 128 {
+ return NewAppError("AuthData.IsValid", "model.authorize.is_valid.auth_code.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest)
+ }
+
+ if ad.ExpiresIn == 0 {
+ return NewAppError("AuthData.IsValid", "model.authorize.is_valid.expires.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if ad.CreateAt <= 0 {
+ return NewAppError("AuthData.IsValid", "model.authorize.is_valid.create_at.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest)
+ }
+
+ if len(ad.RedirectUri) == 0 || len(ad.RedirectUri) > 256 || !IsValidHttpUrl(ad.RedirectUri) {
+ return NewAppError("AuthData.IsValid", "model.authorize.is_valid.redirect_uri.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest)
+ }
+
+ if len(ad.State) > 128 {
+ return NewAppError("AuthData.IsValid", "model.authorize.is_valid.state.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest)
+ }
+
+ if len(ad.Scope) > 128 {
+ return NewAppError("AuthData.IsValid", "model.authorize.is_valid.scope.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+// IsValid validates the AuthorizeRequest and returns an error if it isn't configured
+// correctly.
+func (ar *AuthorizeRequest) IsValid() *AppError {
+
+ if len(ar.ClientId) != 26 {
+ return NewAppError("AuthData.IsValid", "model.authorize.is_valid.client_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(ar.ResponseType) == 0 {
+ return NewAppError("AuthData.IsValid", "model.authorize.is_valid.response_type.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(ar.RedirectUri) == 0 || len(ar.RedirectUri) > 256 || !IsValidHttpUrl(ar.RedirectUri) {
+ return NewAppError("AuthData.IsValid", "model.authorize.is_valid.redirect_uri.app_error", nil, "client_id="+ar.ClientId, http.StatusBadRequest)
+ }
+
+ if len(ar.State) > 128 {
+ return NewAppError("AuthData.IsValid", "model.authorize.is_valid.state.app_error", nil, "client_id="+ar.ClientId, http.StatusBadRequest)
+ }
+
+ if len(ar.Scope) > 128 {
+ return NewAppError("AuthData.IsValid", "model.authorize.is_valid.scope.app_error", nil, "client_id="+ar.ClientId, http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (ad *AuthData) PreSave() {
+ if ad.ExpiresIn == 0 {
+ ad.ExpiresIn = AUTHCODE_EXPIRE_TIME
+ }
+
+ if ad.CreateAt == 0 {
+ ad.CreateAt = GetMillis()
+ }
+
+ if len(ad.Scope) == 0 {
+ ad.Scope = DEFAULT_SCOPE
+ }
+}
+
+func (ad *AuthData) ToJson() string {
+ b, _ := json.Marshal(ad)
+ return string(b)
+}
+
+func AuthDataFromJson(data io.Reader) *AuthData {
+ var ad *AuthData
+ json.NewDecoder(data).Decode(&ad)
+ return ad
+}
+
+func (ar *AuthorizeRequest) ToJson() string {
+ b, _ := json.Marshal(ar)
+ return string(b)
+}
+
+func AuthorizeRequestFromJson(data io.Reader) *AuthorizeRequest {
+ var ar *AuthorizeRequest
+ json.NewDecoder(data).Decode(&ar)
+ return ar
+}
+
+func (ad *AuthData) IsExpired() bool {
+ return GetMillis() > ad.CreateAt+int64(ad.ExpiresIn*1000)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/builtin.go b/vendor/github.com/mattermost/mattermost-server/model/builtin.go
new file mode 100644
index 00000000..5dd00a96
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/builtin.go
@@ -0,0 +1,9 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+func NewBool(b bool) *bool { return &b }
+func NewInt(n int) *int { return &n }
+func NewInt64(n int64) *int64 { return &n }
+func NewString(s string) *string { return &s }
diff --git a/vendor/github.com/mattermost/mattermost-server/model/bundle_info.go b/vendor/github.com/mattermost/mattermost-server/model/bundle_info.go
new file mode 100644
index 00000000..6965159c
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/bundle_info.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+type BundleInfo struct {
+ Path string
+
+ Manifest *Manifest
+ ManifestPath string
+ ManifestError error
+}
+
+// Returns bundle info for the given path. The return value is never nil.
+func BundleInfoForPath(path string) *BundleInfo {
+ m, mpath, err := FindManifest(path)
+ return &BundleInfo{
+ Path: path,
+ Manifest: m,
+ ManifestPath: mpath,
+ ManifestError: err,
+ }
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel.go b/vendor/github.com/mattermost/mattermost-server/model/channel.go
new file mode 100644
index 00000000..ce812be3
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/channel.go
@@ -0,0 +1,208 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "encoding/json"
+ "io"
+ "net/http"
+ "sort"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ CHANNEL_OPEN = "O"
+ CHANNEL_PRIVATE = "P"
+ CHANNEL_DIRECT = "D"
+ CHANNEL_GROUP = "G"
+ CHANNEL_GROUP_MAX_USERS = 8
+ CHANNEL_GROUP_MIN_USERS = 3
+ DEFAULT_CHANNEL = "town-square"
+ CHANNEL_DISPLAY_NAME_MAX_RUNES = 64
+ CHANNEL_NAME_MIN_LENGTH = 2
+ CHANNEL_NAME_MAX_LENGTH = 64
+ CHANNEL_NAME_UI_MAX_LENGTH = 22
+ CHANNEL_HEADER_MAX_RUNES = 1024
+ CHANNEL_PURPOSE_MAX_RUNES = 250
+ CHANNEL_CACHE_SIZE = 25000
+)
+
+type Channel struct {
+ Id string `json:"id"`
+ CreateAt int64 `json:"create_at"`
+ UpdateAt int64 `json:"update_at"`
+ DeleteAt int64 `json:"delete_at"`
+ TeamId string `json:"team_id"`
+ Type string `json:"type"`
+ DisplayName string `json:"display_name"`
+ Name string `json:"name"`
+ Header string `json:"header"`
+ Purpose string `json:"purpose"`
+ LastPostAt int64 `json:"last_post_at"`
+ TotalMsgCount int64 `json:"total_msg_count"`
+ ExtraUpdateAt int64 `json:"extra_update_at"`
+ CreatorId string `json:"creator_id"`
+}
+
+type ChannelPatch struct {
+ DisplayName *string `json:"display_name"`
+ Name *string `json:"name"`
+ Header *string `json:"header"`
+ Purpose *string `json:"purpose"`
+}
+
+func (o *Channel) DeepCopy() *Channel {
+ copy := *o
+ return &copy
+}
+
+func (o *Channel) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func (o *ChannelPatch) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func ChannelFromJson(data io.Reader) *Channel {
+ var o *Channel
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func ChannelPatchFromJson(data io.Reader) *ChannelPatch {
+ var o *ChannelPatch
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o *Channel) Etag() string {
+ return Etag(o.Id, o.UpdateAt)
+}
+
+func (o *Channel) StatsEtag() string {
+ return Etag(o.Id, o.ExtraUpdateAt)
+}
+
+func (o *Channel) IsValid() *AppError {
+
+ if len(o.Id) != 26 {
+ return NewAppError("Channel.IsValid", "model.channel.is_valid.id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if o.CreateAt == 0 {
+ return NewAppError("Channel.IsValid", "model.channel.is_valid.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if o.UpdateAt == 0 {
+ return NewAppError("Channel.IsValid", "model.channel.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if utf8.RuneCountInString(o.DisplayName) > CHANNEL_DISPLAY_NAME_MAX_RUNES {
+ return NewAppError("Channel.IsValid", "model.channel.is_valid.display_name.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if !IsValidChannelIdentifier(o.Name) {
+ return NewAppError("Channel.IsValid", "model.channel.is_valid.2_or_more.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if !(o.Type == CHANNEL_OPEN || o.Type == CHANNEL_PRIVATE || o.Type == CHANNEL_DIRECT || o.Type == CHANNEL_GROUP) {
+ return NewAppError("Channel.IsValid", "model.channel.is_valid.type.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if utf8.RuneCountInString(o.Header) > CHANNEL_HEADER_MAX_RUNES {
+ return NewAppError("Channel.IsValid", "model.channel.is_valid.header.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if utf8.RuneCountInString(o.Purpose) > CHANNEL_PURPOSE_MAX_RUNES {
+ return NewAppError("Channel.IsValid", "model.channel.is_valid.purpose.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if len(o.CreatorId) > 26 {
+ return NewAppError("Channel.IsValid", "model.channel.is_valid.creator_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (o *Channel) PreSave() {
+ if o.Id == "" {
+ o.Id = NewId()
+ }
+
+ o.CreateAt = GetMillis()
+ o.UpdateAt = o.CreateAt
+ o.ExtraUpdateAt = o.CreateAt
+}
+
+func (o *Channel) PreUpdate() {
+ o.UpdateAt = GetMillis()
+}
+
+func (o *Channel) ExtraUpdated() {
+ o.ExtraUpdateAt = GetMillis()
+}
+
+func (o *Channel) IsGroupOrDirect() bool {
+ return o.Type == CHANNEL_DIRECT || o.Type == CHANNEL_GROUP
+}
+
+func (o *Channel) Patch(patch *ChannelPatch) {
+ if patch.DisplayName != nil {
+ o.DisplayName = *patch.DisplayName
+ }
+
+ if patch.Name != nil {
+ o.Name = *patch.Name
+ }
+
+ if patch.Header != nil {
+ o.Header = *patch.Header
+ }
+
+ if patch.Purpose != nil {
+ o.Purpose = *patch.Purpose
+ }
+}
+
+func GetDMNameFromIds(userId1, userId2 string) string {
+ if userId1 > userId2 {
+ return userId2 + "__" + userId1
+ } else {
+ return userId1 + "__" + userId2
+ }
+}
+
+func GetGroupDisplayNameFromUsers(users []*User, truncate bool) string {
+ usernames := make([]string, len(users))
+ for index, user := range users {
+ usernames[index] = user.Username
+ }
+
+ sort.Strings(usernames)
+
+ name := strings.Join(usernames, ", ")
+
+ if truncate && len(name) > CHANNEL_NAME_MAX_LENGTH {
+ name = name[:CHANNEL_NAME_MAX_LENGTH]
+ }
+
+ return name
+}
+
+func GetGroupNameFromUserIds(userIds []string) string {
+ sort.Strings(userIds)
+
+ h := sha1.New()
+ for _, id := range userIds {
+ io.WriteString(h, id)
+ }
+
+ return hex.EncodeToString(h.Sum(nil))
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_count.go b/vendor/github.com/mattermost/mattermost-server/model/channel_count.go
new file mode 100644
index 00000000..8c6d8dd0
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/channel_count.go
@@ -0,0 +1,54 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "crypto/md5"
+ "encoding/json"
+ "fmt"
+ "io"
+ "sort"
+ "strconv"
+)
+
+type ChannelCounts struct {
+ Counts map[string]int64 `json:"counts"`
+ UpdateTimes map[string]int64 `json:"update_times"`
+}
+
+func (o *ChannelCounts) Etag() string {
+
+ ids := []string{}
+ for id := range o.Counts {
+ ids = append(ids, id)
+ }
+ sort.Strings(ids)
+
+ str := ""
+ for _, id := range ids {
+ str += id + strconv.FormatInt(o.Counts[id], 10)
+ }
+
+ md5Counts := fmt.Sprintf("%x", md5.Sum([]byte(str)))
+
+ var update int64 = 0
+ for _, u := range o.UpdateTimes {
+ if u > update {
+ update = u
+ }
+ }
+
+ return Etag(md5Counts, update)
+}
+
+func (o *ChannelCounts) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func ChannelCountsFromJson(data io.Reader) *ChannelCounts {
+ var o *ChannelCounts
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_data.go b/vendor/github.com/mattermost/mattermost-server/model/channel_data.go
new file mode 100644
index 00000000..aae0a149
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/channel_data.go
@@ -0,0 +1,34 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type ChannelData struct {
+ Channel *Channel `json:"channel"`
+ Member *ChannelMember `json:"member"`
+}
+
+func (o *ChannelData) Etag() string {
+ var mt int64 = 0
+ if o.Member != nil {
+ mt = o.Member.LastUpdateAt
+ }
+
+ return Etag(o.Channel.Id, o.Channel.UpdateAt, o.Channel.LastPostAt, mt)
+}
+
+func (o *ChannelData) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func ChannelDataFromJson(data io.Reader) *ChannelData {
+ var o *ChannelData
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_list.go b/vendor/github.com/mattermost/mattermost-server/model/channel_list.go
new file mode 100644
index 00000000..1b3bda46
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/channel_list.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type ChannelList []*Channel
+
+func (o *ChannelList) ToJson() string {
+ if b, err := json.Marshal(o); err != nil {
+ return "[]"
+ } else {
+ return string(b)
+ }
+}
+
+func (o *ChannelList) Etag() string {
+
+ id := "0"
+ var t int64 = 0
+ var delta int64 = 0
+
+ for _, v := range *o {
+ if v.LastPostAt > t {
+ t = v.LastPostAt
+ id = v.Id
+ }
+
+ if v.UpdateAt > t {
+ t = v.UpdateAt
+ id = v.Id
+ }
+
+ }
+
+ return Etag(id, t, delta, len(*o))
+}
+
+func ChannelListFromJson(data io.Reader) *ChannelList {
+ var o *ChannelList
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func ChannelSliceFromJson(data io.Reader) []*Channel {
+ var o []*Channel
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_member.go b/vendor/github.com/mattermost/mattermost-server/model/channel_member.go
new file mode 100644
index 00000000..e9895aea
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/channel_member.go
@@ -0,0 +1,148 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "strings"
+)
+
+const (
+ CHANNEL_NOTIFY_DEFAULT = "default"
+ CHANNEL_NOTIFY_ALL = "all"
+ CHANNEL_NOTIFY_MENTION = "mention"
+ CHANNEL_NOTIFY_NONE = "none"
+ CHANNEL_MARK_UNREAD_ALL = "all"
+ CHANNEL_MARK_UNREAD_MENTION = "mention"
+)
+
+type ChannelUnread struct {
+ TeamId string `json:"team_id"`
+ ChannelId string `json:"channel_id"`
+ MsgCount int64 `json:"msg_count"`
+ MentionCount int64 `json:"mention_count"`
+ NotifyProps StringMap `json:"-"`
+}
+
+type ChannelMember struct {
+ ChannelId string `json:"channel_id"`
+ UserId string `json:"user_id"`
+ Roles string `json:"roles"`
+ LastViewedAt int64 `json:"last_viewed_at"`
+ MsgCount int64 `json:"msg_count"`
+ MentionCount int64 `json:"mention_count"`
+ NotifyProps StringMap `json:"notify_props"`
+ LastUpdateAt int64 `json:"last_update_at"`
+}
+
+type ChannelMembers []ChannelMember
+
+func (o *ChannelMembers) ToJson() string {
+ if b, err := json.Marshal(o); err != nil {
+ return "[]"
+ } else {
+ return string(b)
+ }
+}
+
+func (o *ChannelUnread) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func ChannelMembersFromJson(data io.Reader) *ChannelMembers {
+ var o *ChannelMembers
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func ChannelUnreadFromJson(data io.Reader) *ChannelUnread {
+ var o *ChannelUnread
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o *ChannelMember) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func ChannelMemberFromJson(data io.Reader) *ChannelMember {
+ var o *ChannelMember
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o *ChannelMember) IsValid() *AppError {
+
+ if len(o.ChannelId) != 26 {
+ return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.UserId) != 26 {
+ return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ notifyLevel := o.NotifyProps[DESKTOP_NOTIFY_PROP]
+ if len(notifyLevel) > 20 || !IsChannelNotifyLevelValid(notifyLevel) {
+ return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.notify_level.app_error", nil, "notify_level="+notifyLevel, http.StatusBadRequest)
+ }
+
+ markUnreadLevel := o.NotifyProps[MARK_UNREAD_NOTIFY_PROP]
+ if len(markUnreadLevel) > 20 || !IsChannelMarkUnreadLevelValid(markUnreadLevel) {
+ return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.unread_level.app_error", nil, "mark_unread_level="+markUnreadLevel, http.StatusBadRequest)
+ }
+
+ if pushLevel, ok := o.NotifyProps[PUSH_NOTIFY_PROP]; ok {
+ if len(pushLevel) > 20 || !IsChannelNotifyLevelValid(pushLevel) {
+ return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.push_level.app_error", nil, "push_notification_level="+pushLevel, http.StatusBadRequest)
+ }
+ }
+
+ if sendEmail, ok := o.NotifyProps[EMAIL_NOTIFY_PROP]; ok {
+ if len(sendEmail) > 20 || !IsSendEmailValid(sendEmail) {
+ return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.email_value.app_error", nil, "push_notification_level="+sendEmail, http.StatusBadRequest)
+ }
+ }
+
+ return nil
+}
+
+func (o *ChannelMember) PreSave() {
+ o.LastUpdateAt = GetMillis()
+}
+
+func (o *ChannelMember) PreUpdate() {
+ o.LastUpdateAt = GetMillis()
+}
+
+func (o *ChannelMember) GetRoles() []string {
+ return strings.Fields(o.Roles)
+}
+
+func IsChannelNotifyLevelValid(notifyLevel string) bool {
+ return notifyLevel == CHANNEL_NOTIFY_DEFAULT ||
+ notifyLevel == CHANNEL_NOTIFY_ALL ||
+ notifyLevel == CHANNEL_NOTIFY_MENTION ||
+ notifyLevel == CHANNEL_NOTIFY_NONE
+}
+
+func IsChannelMarkUnreadLevelValid(markUnreadLevel string) bool {
+ return markUnreadLevel == CHANNEL_MARK_UNREAD_ALL || markUnreadLevel == CHANNEL_MARK_UNREAD_MENTION
+}
+
+func IsSendEmailValid(sendEmail string) bool {
+ return sendEmail == CHANNEL_NOTIFY_DEFAULT || sendEmail == "true" || sendEmail == "false"
+}
+
+func GetDefaultChannelNotifyProps() StringMap {
+ return StringMap{
+ DESKTOP_NOTIFY_PROP: CHANNEL_NOTIFY_DEFAULT,
+ MARK_UNREAD_NOTIFY_PROP: CHANNEL_MARK_UNREAD_ALL,
+ PUSH_NOTIFY_PROP: CHANNEL_NOTIFY_DEFAULT,
+ EMAIL_NOTIFY_PROP: CHANNEL_NOTIFY_DEFAULT,
+ }
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_member_history.go b/vendor/github.com/mattermost/mattermost-server/model/channel_member_history.go
new file mode 100644
index 00000000..47c59d54
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/channel_member_history.go
@@ -0,0 +1,15 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+type ChannelMemberHistory struct {
+ ChannelId string
+ UserId string
+ JoinTime int64
+ LeaveTime *int64
+
+ // these two fields are never set in the database - when we SELECT, we join on Users to get them
+ UserEmail string `db:"Email"`
+ Username string
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_search.go b/vendor/github.com/mattermost/mattermost-server/model/channel_search.go
new file mode 100644
index 00000000..593cf669
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/channel_search.go
@@ -0,0 +1,26 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type ChannelSearch struct {
+ Term string `json:"term"`
+}
+
+// ToJson convert a Channel to a json string
+func (c *ChannelSearch) ToJson() string {
+ b, _ := json.Marshal(c)
+ return string(b)
+}
+
+// ChannelSearchFromJson will decode the input and return a Channel
+func ChannelSearchFromJson(data io.Reader) *ChannelSearch {
+ var cs *ChannelSearch
+ json.NewDecoder(data).Decode(&cs)
+ return cs
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_stats.go b/vendor/github.com/mattermost/mattermost-server/model/channel_stats.go
new file mode 100644
index 00000000..21af920f
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/channel_stats.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type ChannelStats struct {
+ ChannelId string `json:"channel_id"`
+ MemberCount int64 `json:"member_count"`
+}
+
+func (o *ChannelStats) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func ChannelStatsFromJson(data io.Reader) *ChannelStats {
+ var o *ChannelStats
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_view.go b/vendor/github.com/mattermost/mattermost-server/model/channel_view.go
new file mode 100644
index 00000000..650d14ce
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/channel_view.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type ChannelView struct {
+ ChannelId string `json:"channel_id"`
+ PrevChannelId string `json:"prev_channel_id"`
+}
+
+func (o *ChannelView) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func ChannelViewFromJson(data io.Reader) *ChannelView {
+ var o *ChannelView
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+type ChannelViewResponse struct {
+ Status string `json:"status"`
+ LastViewedAtTimes map[string]int64 `json:"last_viewed_at_times"`
+}
+
+func (o *ChannelViewResponse) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func ChannelViewResponseFromJson(data io.Reader) *ChannelViewResponse {
+ var o *ChannelViewResponse
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/client.go b/vendor/github.com/mattermost/mattermost-server/model/client.go
new file mode 100644
index 00000000..ef890b59
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/client.go
@@ -0,0 +1,2379 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ l4g "github.com/alecthomas/log4go"
+)
+
+var UsedApiV3 *int32 = new(int32)
+
+const (
+ HEADER_REQUEST_ID = "X-Request-ID"
+ HEADER_VERSION_ID = "X-Version-ID"
+ HEADER_CLUSTER_ID = "X-Cluster-ID"
+ HEADER_ETAG_SERVER = "ETag"
+ HEADER_ETAG_CLIENT = "If-None-Match"
+ HEADER_FORWARDED = "X-Forwarded-For"
+ HEADER_REAL_IP = "X-Real-IP"
+ HEADER_FORWARDED_PROTO = "X-Forwarded-Proto"
+ HEADER_TOKEN = "token"
+ HEADER_BEARER = "BEARER"
+ HEADER_AUTH = "Authorization"
+ HEADER_REQUESTED_WITH = "X-Requested-With"
+ HEADER_REQUESTED_WITH_XML = "XMLHttpRequest"
+ STATUS = "status"
+ STATUS_OK = "OK"
+ STATUS_FAIL = "FAIL"
+ STATUS_REMOVE = "REMOVE"
+
+ CLIENT_DIR = "client"
+
+ API_URL_SUFFIX_V1 = "/api/v1"
+ API_URL_SUFFIX_V3 = "/api/v3"
+ API_URL_SUFFIX_V4 = "/api/v4"
+ API_URL_SUFFIX = API_URL_SUFFIX_V4
+)
+
+type Result struct {
+ RequestId string
+ Etag string
+ Data interface{}
+}
+
+type ResponseMetadata struct {
+ StatusCode int
+ Error *AppError
+ RequestId string
+ Etag string
+}
+
+type Client struct {
+ Url string // The location of the server like "http://localhost:8065"
+ ApiUrl string // The api location of the server like "http://localhost:8065/api/v3"
+ HttpClient *http.Client // The http client
+ AuthToken string
+ AuthType string
+ TeamId string
+ RequestId string
+ Etag string
+ ServerVersion string
+}
+
+// NewClient constructs a new client with convienence methods for talking to
+// the server.
+func NewClient(url string) *Client {
+ return &Client{url, url + API_URL_SUFFIX_V3, &http.Client{}, "", "", "", "", "", ""}
+}
+
+func closeBody(r *http.Response) {
+ if r.Body != nil {
+ ioutil.ReadAll(r.Body)
+ r.Body.Close()
+ }
+}
+
+func (c *Client) SetOAuthToken(token string) {
+ c.AuthToken = token
+ c.AuthType = HEADER_TOKEN
+}
+
+func (c *Client) ClearOAuthToken() {
+ c.AuthToken = ""
+ c.AuthType = HEADER_BEARER
+}
+
+func (c *Client) SetTeamId(teamId string) {
+ c.TeamId = teamId
+}
+
+func (c *Client) GetTeamId() string {
+ if len(c.TeamId) == 0 {
+ println(`You are trying to use a route that requires a team_id,
+ but you have not called SetTeamId() in client.go`)
+ }
+
+ return c.TeamId
+}
+
+func (c *Client) ClearTeamId() {
+ c.TeamId = ""
+}
+
+func (c *Client) GetTeamRoute() string {
+ return fmt.Sprintf("/teams/%v", c.GetTeamId())
+}
+
+func (c *Client) GetChannelRoute(channelId string) string {
+ return fmt.Sprintf("/teams/%v/channels/%v", c.GetTeamId(), channelId)
+}
+
+func (c *Client) GetUserRequiredRoute(userId string) string {
+ return fmt.Sprintf("/users/%v", userId)
+}
+
+func (c *Client) GetChannelNameRoute(channelName string) string {
+ return fmt.Sprintf("/teams/%v/channels/name/%v", c.GetTeamId(), channelName)
+}
+
+func (c *Client) GetEmojiRoute() string {
+ return "/emoji"
+}
+
+func (c *Client) GetGeneralRoute() string {
+ return "/general"
+}
+
+func (c *Client) GetFileRoute(fileId string) string {
+ return fmt.Sprintf("/files/%v", fileId)
+}
+
+func (c *Client) DoPost(url, data, contentType string) (*http.Response, *AppError) {
+ rq, _ := http.NewRequest("POST", c.Url+url, strings.NewReader(data))
+ rq.Header.Set("Content-Type", contentType)
+ rq.Close = true
+
+ if rp, err := c.HttpClient.Do(rq); err != nil {
+ return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0)
+ } else if rp.StatusCode >= 300 {
+ defer closeBody(rp)
+ return nil, AppErrorFromJson(rp.Body)
+ } else {
+ return rp, nil
+ }
+}
+
+func (c *Client) DoApiPost(url string, data string) (*http.Response, *AppError) {
+ rq, _ := http.NewRequest("POST", c.ApiUrl+url, strings.NewReader(data))
+ rq.Close = true
+
+ if len(c.AuthToken) > 0 {
+ rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken)
+ }
+
+ if rp, err := c.HttpClient.Do(rq); err != nil {
+ return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0)
+ } else if rp.StatusCode >= 300 {
+ defer closeBody(rp)
+ return nil, AppErrorFromJson(rp.Body)
+ } else {
+ return rp, nil
+ }
+}
+
+func (c *Client) DoApiGet(url string, data string, etag string) (*http.Response, *AppError) {
+ rq, _ := http.NewRequest("GET", c.ApiUrl+url, strings.NewReader(data))
+ rq.Close = true
+
+ if len(etag) > 0 {
+ rq.Header.Set(HEADER_ETAG_CLIENT, etag)
+ }
+
+ if len(c.AuthToken) > 0 {
+ rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken)
+ }
+
+ if rp, err := c.HttpClient.Do(rq); err != nil {
+ return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0)
+ } else if rp.StatusCode == 304 {
+ return rp, nil
+ } else if rp.StatusCode >= 300 {
+ defer closeBody(rp)
+ return rp, AppErrorFromJson(rp.Body)
+ } else {
+ return rp, nil
+ }
+}
+
+func getCookie(name string, resp *http.Response) *http.Cookie {
+ for _, cookie := range resp.Cookies() {
+ if cookie.Name == name {
+ return cookie
+ }
+ }
+
+ return nil
+}
+
+// Must is a convenience function used for testing.
+func (c *Client) Must(result *Result, err *AppError) *Result {
+ if err != nil {
+ l4g.Close()
+ time.Sleep(time.Second)
+ panic(err)
+ }
+
+ return result
+}
+
+// MustGeneric is a convenience function used for testing.
+func (c *Client) MustGeneric(result interface{}, err *AppError) interface{} {
+ if err != nil {
+ l4g.Close()
+ time.Sleep(time.Second)
+ panic(err)
+ }
+
+ return result
+}
+
+// CheckStatusOK is a convenience function for checking the return of Web Service
+// call that return the a map of status=OK.
+func (c *Client) CheckStatusOK(r *http.Response) bool {
+ m := MapFromJson(r.Body)
+ defer closeBody(r)
+
+ if m != nil && m[STATUS] == STATUS_OK {
+ return true
+ }
+
+ return false
+}
+
+func (c *Client) fillInExtraProperties(r *http.Response) {
+ c.RequestId = r.Header.Get(HEADER_REQUEST_ID)
+ c.Etag = r.Header.Get(HEADER_ETAG_SERVER)
+ c.ServerVersion = r.Header.Get(HEADER_VERSION_ID)
+}
+
+func (c *Client) clearExtraProperties() {
+ c.RequestId = ""
+ c.Etag = ""
+ c.ServerVersion = ""
+}
+
+// General Routes Section
+
+// GetClientProperties returns properties needed by the client to show/hide
+// certian features. It returns a map of strings.
+func (c *Client) GetClientProperties() (map[string]string, *AppError) {
+ c.clearExtraProperties()
+ if r, err := c.DoApiGet(c.GetGeneralRoute()+"/client_props", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ c.fillInExtraProperties(r)
+ return MapFromJson(r.Body), nil
+ }
+}
+
+// LogClient is a convenience Web Service call so clients can log messages into
+// the server-side logs. For example we typically log javascript error messages
+// into the server-side. It returns true if the logging was successful.
+func (c *Client) LogClient(message string) (bool, *AppError) {
+ c.clearExtraProperties()
+ m := make(map[string]string)
+ m["level"] = "ERROR"
+ m["message"] = message
+
+ if r, err := c.DoApiPost(c.GetGeneralRoute()+"/log_client", MapToJson(m)); err != nil {
+ return false, err
+ } else {
+ defer closeBody(r)
+ c.fillInExtraProperties(r)
+ return c.CheckStatusOK(r), nil
+ }
+}
+
+// GetPing returns a map of strings with server time, server version, and node Id.
+// Systems that want to check on health status of the server should check the
+// url /api/v3/ping for a 200 status response.
+func (c *Client) GetPing() (map[string]string, *AppError) {
+ c.clearExtraProperties()
+ if r, err := c.DoApiGet(c.GetGeneralRoute()+"/ping", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ c.fillInExtraProperties(r)
+ return MapFromJson(r.Body), nil
+ }
+}
+
+// Team Routes Section
+
+// CreateTeam creates a team based on the provided Team struct. On success it returns
+// the Team struct with the Id, CreateAt and other server-decided fields populated.
+func (c *Client) CreateTeam(team *Team) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/teams/create", team.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), TeamFromJson(r.Body)}, nil
+ }
+}
+
+// GetAllTeams returns a map of all teams using team ids as the key.
+func (c *Client) GetAllTeams() (*Result, *AppError) {
+ if r, err := c.DoApiGet("/teams/all", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), TeamMapFromJson(r.Body)}, nil
+ }
+}
+
+// GetAllTeamListings returns a map of all teams that are available to join
+// using team ids as the key. Must be authenticated.
+func (c *Client) GetAllTeamListings() (*Result, *AppError) {
+ if r, err := c.DoApiGet("/teams/all_team_listings", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), TeamMapFromJson(r.Body)}, nil
+ }
+}
+
+// FindTeamByName returns the strings "true" or "false" depending on if a team
+// with the provided name was found.
+func (c *Client) FindTeamByName(name string) (*Result, *AppError) {
+ m := make(map[string]string)
+ m["name"] = name
+ if r, err := c.DoApiPost("/teams/find_team_by_name", MapToJson(m)); err != nil {
+ return nil, err
+ } else {
+ val := false
+ if body, _ := ioutil.ReadAll(r.Body); string(body) == "true" {
+ val = true
+ }
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), val}, nil
+ }
+}
+
+// Adds a user directly to the team without sending an invite.
+// The teamId and userId are required. You must be a valid member of the team and/or
+// have the correct role to add new users to the team. Returns a map of user_id=userId
+// if successful, otherwise returns an AppError.
+func (c *Client) AddUserToTeam(teamId string, userId string) (*Result, *AppError) {
+ if len(teamId) == 0 {
+ teamId = c.GetTeamId()
+ }
+
+ data := make(map[string]string)
+ data["user_id"] = userId
+ if r, err := c.DoApiPost(fmt.Sprintf("/teams/%v", teamId)+"/add_user_to_team", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+// AddUserToTeamFromInvite adds a user to a team based off data provided in an invite link.
+// Either hash and dataToHash are required or inviteId is required.
+func (c *Client) AddUserToTeamFromInvite(hash, dataToHash, inviteId string) (*Result, *AppError) {
+ data := make(map[string]string)
+ data["hash"] = hash
+ data["data"] = dataToHash
+ data["invite_id"] = inviteId
+ if r, err := c.DoApiPost("/teams/add_user_to_team_from_invite", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), TeamFromJson(r.Body)}, nil
+ }
+}
+
+// Removes a user directly from the team.
+// The teamId and userId are required. You must be a valid member of the team and/or
+// have the correct role to remove a user from the team. Returns a map of user_id=userId
+// if successful, otherwise returns an AppError.
+func (c *Client) RemoveUserFromTeam(teamId string, userId string) (*Result, *AppError) {
+ if len(teamId) == 0 {
+ teamId = c.GetTeamId()
+ }
+
+ data := make(map[string]string)
+ data["user_id"] = userId
+ if r, err := c.DoApiPost(fmt.Sprintf("/teams/%v", teamId)+"/remove_user_from_team", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) InviteMembers(invites *Invites) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/invite_members", invites.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), InvitesFromJson(r.Body)}, nil
+ }
+}
+
+// UpdateTeam updates a team based on the changes in the provided team struct. On success
+// it returns a sanitized version of the updated team. Must be authenticated as a team admin
+// for that team or a system admin.
+func (c *Client) UpdateTeam(team *Team) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/update", team.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), TeamFromJson(r.Body)}, nil
+ }
+}
+
+// User Routes Section
+
+// CreateUser creates a user in the system based on the provided user struct.
+func (c *Client) CreateUser(user *User, hash string) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/users/create", user.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil
+ }
+}
+
+// CreateUserWithInvite creates a user based on the provided user struct. Either the hash and
+// data strings or the inviteId is required from the invite.
+func (c *Client) CreateUserWithInvite(user *User, hash string, data string, inviteId string) (*Result, *AppError) {
+
+ url := "/users/create?d=" + url.QueryEscape(data) + "&h=" + url.QueryEscape(hash) + "&iid=" + url.QueryEscape(inviteId)
+
+ if r, err := c.DoApiPost(url, user.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) CreateUserFromSignup(user *User, data string, hash string) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/users/create?d="+url.QueryEscape(data)+"&h="+hash, user.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil
+ }
+}
+
+// GetUser returns a user based on a provided user id string. Must be authenticated.
+func (c *Client) GetUser(id string, etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet("/users/"+id+"/get", "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil
+ }
+}
+
+// getByUsername returns a user based on a provided username string. Must be authenticated.
+func (c *Client) GetByUsername(username string, etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(fmt.Sprintf("/users/name/%v", username), "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil
+ }
+}
+
+// getByEmail returns a user based on a provided username string. Must be authenticated.
+func (c *Client) GetByEmail(email string, etag string) (*User, *ResponseMetadata) {
+ if r, err := c.DoApiGet(fmt.Sprintf("/users/email/%v", email), "", etag); err != nil {
+ return nil, &ResponseMetadata{StatusCode: r.StatusCode, Error: err}
+ } else {
+ defer closeBody(r)
+ return UserFromJson(r.Body),
+ &ResponseMetadata{
+ StatusCode: r.StatusCode,
+ RequestId: r.Header.Get(HEADER_REQUEST_ID),
+ Etag: r.Header.Get(HEADER_ETAG_SERVER),
+ }
+ }
+}
+
+// GetMe returns the current user.
+func (c *Client) GetMe(etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet("/users/me", "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil
+ }
+}
+
+// GetProfiles returns a map of users using user id as the key. Must be authenticated.
+func (c *Client) GetProfiles(offset int, limit int, etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(fmt.Sprintf("/users/%v/%v", offset, limit), "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserMapFromJson(r.Body)}, nil
+ }
+}
+
+// GetProfilesInTeam returns a map of users for a team using user id as the key. Must
+// be authenticated.
+func (c *Client) GetProfilesInTeam(teamId string, offset int, limit int, etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(fmt.Sprintf("/teams/%v/users/%v/%v", teamId, offset, limit), "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserMapFromJson(r.Body)}, nil
+ }
+}
+
+// GetProfilesInChannel returns a map of users for a channel using user id as the key. Must
+// be authenticated.
+func (c *Client) GetProfilesInChannel(channelId string, offset int, limit int, etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(fmt.Sprintf(c.GetChannelRoute(channelId)+"/users/%v/%v", offset, limit), "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserMapFromJson(r.Body)}, nil
+ }
+}
+
+// GetProfilesNotInChannel returns a map of users not in a channel but on the team using user id as the key. Must
+// be authenticated.
+func (c *Client) GetProfilesNotInChannel(channelId string, offset int, limit int, etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(fmt.Sprintf(c.GetChannelRoute(channelId)+"/users/not_in_channel/%v/%v", offset, limit), "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserMapFromJson(r.Body)}, nil
+ }
+}
+
+// GetProfilesByIds returns a map of users based on the user ids provided. Must
+// be authenticated.
+func (c *Client) GetProfilesByIds(userIds []string) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/users/ids", ArrayToJson(userIds)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserMapFromJson(r.Body)}, nil
+ }
+}
+
+// SearchUsers returns a list of users that have a username matching or similar to the search term. Must
+// be authenticated.
+func (c *Client) SearchUsers(params UserSearch) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/users/search", params.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserListFromJson(r.Body)}, nil
+ }
+}
+
+// AutocompleteUsersInChannel returns two lists for autocompletion of users in a channel. The first list "in_channel",
+// specifies users in the channel. The second list "out_of_channel" specifies users outside of the
+// channel. Term, the string to search against, is required, channel id is also required. Must be authenticated.
+func (c *Client) AutocompleteUsersInChannel(term string, channelId string) (*Result, *AppError) {
+ url := fmt.Sprintf("%s/users/autocomplete?term=%s", c.GetChannelRoute(channelId), url.QueryEscape(term))
+ if r, err := c.DoApiGet(url, "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserAutocompleteInChannelFromJson(r.Body)}, nil
+ }
+}
+
+// AutocompleteUsersInTeam returns a list for autocompletion of users in a team. The list "in_team" specifies
+// the users in the team that match the provided term, matching against username, full name and
+// nickname. Must be authenticated.
+func (c *Client) AutocompleteUsersInTeam(term string) (*Result, *AppError) {
+ url := fmt.Sprintf("%s/users/autocomplete?term=%s", c.GetTeamRoute(), url.QueryEscape(term))
+ if r, err := c.DoApiGet(url, "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserAutocompleteInTeamFromJson(r.Body)}, nil
+ }
+}
+
+// AutocompleteUsers returns a list for autocompletion of users on the system that match the provided term,
+// matching against username, full name and nickname. Must be authenticated.
+func (c *Client) AutocompleteUsers(term string) (*Result, *AppError) {
+ url := fmt.Sprintf("/users/autocomplete?term=%s", url.QueryEscape(term))
+ if r, err := c.DoApiGet(url, "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserListFromJson(r.Body)}, nil
+ }
+}
+
+// LoginById authenticates a user by user id and password.
+func (c *Client) LoginById(id string, password string) (*Result, *AppError) {
+ m := make(map[string]string)
+ m["id"] = id
+ m["password"] = password
+ return c.login(m)
+}
+
+// Login authenticates a user by login id, which can be username, email or some sort
+// of SSO identifier based on configuration, and a password.
+func (c *Client) Login(loginId string, password string) (*Result, *AppError) {
+ m := make(map[string]string)
+ m["login_id"] = loginId
+ m["password"] = password
+ return c.login(m)
+}
+
+// LoginByLdap authenticates a user by LDAP id and password.
+func (c *Client) LoginByLdap(loginId string, password string) (*Result, *AppError) {
+ m := make(map[string]string)
+ m["login_id"] = loginId
+ m["password"] = password
+ m["ldap_only"] = "true"
+ return c.login(m)
+}
+
+// LoginWithDevice authenticates a user by login id (username, email or some sort
+// of SSO identifier based on configuration), password and attaches a device id to
+// the session.
+func (c *Client) LoginWithDevice(loginId string, password string, deviceId string) (*Result, *AppError) {
+ m := make(map[string]string)
+ m["login_id"] = loginId
+ m["password"] = password
+ m["device_id"] = deviceId
+ return c.login(m)
+}
+
+func (c *Client) login(m map[string]string) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/users/login", MapToJson(m)); err != nil {
+ return nil, err
+ } else {
+ c.AuthToken = r.Header.Get(HEADER_TOKEN)
+ c.AuthType = HEADER_BEARER
+ sessionToken := getCookie(SESSION_COOKIE_TOKEN, r)
+
+ if c.AuthToken != sessionToken.Value {
+ NewAppError("/users/login", "model.client.login.app_error", nil, "", 0)
+ }
+
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil
+ }
+}
+
+// Logout terminates the current user's session.
+func (c *Client) Logout() (*Result, *AppError) {
+ if r, err := c.DoApiPost("/users/logout", ""); err != nil {
+ return nil, err
+ } else {
+ c.AuthToken = ""
+ c.AuthType = HEADER_BEARER
+ c.TeamId = ""
+
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+// CheckMfa returns a map with key "mfa_required" with the string value "true" or "false",
+// indicating whether MFA is required to log the user in, based on a provided login id
+// (username, email or some sort of SSO identifier based on configuration).
+func (c *Client) CheckMfa(loginId string) (*Result, *AppError) {
+ m := make(map[string]string)
+ m["login_id"] = loginId
+
+ if r, err := c.DoApiPost("/users/mfa", MapToJson(m)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+// GenerateMfaSecret returns a QR code image containing the secret, to be scanned
+// by a multi-factor authentication mobile application. It also returns the secret
+// for manual entry. Must be authenticated.
+func (c *Client) GenerateMfaSecret() (*Result, *AppError) {
+ if r, err := c.DoApiGet("/users/generate_mfa_secret", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+// UpdateMfa activates multi-factor authenticates for the current user if activate
+// is true and a valid token is provided. If activate is false, then token is not
+// required and multi-factor authentication is disabled for the current user.
+func (c *Client) UpdateMfa(activate bool, token string) (*Result, *AppError) {
+ m := make(map[string]interface{})
+ m["activate"] = activate
+ m["token"] = token
+
+ if r, err := c.DoApiPost("/users/update_mfa", StringInterfaceToJson(m)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) AdminResetMfa(userId string) (*Result, *AppError) {
+ m := make(map[string]string)
+ m["user_id"] = userId
+
+ if r, err := c.DoApiPost("/admin/reset_mfa", MapToJson(m)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) RevokeSession(sessionAltId string) (*Result, *AppError) {
+ m := make(map[string]string)
+ m["id"] = sessionAltId
+
+ if r, err := c.DoApiPost("/users/revoke_session", MapToJson(m)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetSessions(id string) (*Result, *AppError) {
+ if r, err := c.DoApiGet("/users/"+id+"/sessions", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), SessionsFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) EmailToOAuth(m map[string]string) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/users/claim/email_to_oauth", MapToJson(m)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) OAuthToEmail(m map[string]string) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/users/claim/oauth_to_email", MapToJson(m)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) LDAPToEmail(m map[string]string) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/users/claim/ldap_to_email", MapToJson(m)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) EmailToLDAP(m map[string]string) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/users/claim/ldap_to_email", MapToJson(m)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) Command(channelId string, command string) (*Result, *AppError) {
+ args := &CommandArgs{ChannelId: channelId, Command: command}
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/commands/execute", args.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), CommandResponseFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) ListCommands() (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetTeamRoute()+"/commands/list", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), CommandListFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) ListTeamCommands() (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetTeamRoute()+"/commands/list_team_commands", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), CommandListFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) CreateCommand(cmd *Command) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/commands/create", cmd.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), CommandFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) UpdateCommand(cmd *Command) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/commands/update", cmd.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), CommandFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) RegenCommandToken(data map[string]string) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/commands/regen_token", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), CommandFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) DeleteCommand(data map[string]string) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/commands/delete", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetAudits(id string, etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet("/users/"+id+"/audits", "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), AuditsFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetLogs() (*Result, *AppError) {
+ if r, err := c.DoApiGet("/admin/logs", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ArrayFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetClusterStatus() ([]*ClusterInfo, *AppError) {
+ if r, err := c.DoApiGet("/admin/cluster_status", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return ClusterInfosFromJson(r.Body), nil
+ }
+}
+
+// GetRecentlyActiveUsers returns a map of users including lastActivityAt using user id as the key
+func (c *Client) GetRecentlyActiveUsers(teamId string) (*Result, *AppError) {
+ if r, err := c.DoApiGet("/admin/recently_active_users/"+teamId, "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserMapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetAllAudits() (*Result, *AppError) {
+ if r, err := c.DoApiGet("/admin/audits", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), AuditsFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetConfig() (*Result, *AppError) {
+ if r, err := c.DoApiGet("/admin/config", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ConfigFromJson(r.Body)}, nil
+ }
+}
+
+// ReloadConfig will reload the config.json file from disk. Properties
+// requiring a server restart will still need a server restart. You must
+// have the system admin role to call this method. It will return status=OK
+// if it's successfully reloaded the config file, otherwise check the returned error.
+func (c *Client) ReloadConfig() (bool, *AppError) {
+ c.clearExtraProperties()
+ if r, err := c.DoApiGet("/admin/reload_config", "", ""); err != nil {
+ return false, err
+ } else {
+ c.fillInExtraProperties(r)
+ return c.CheckStatusOK(r), nil
+ }
+}
+
+func (c *Client) InvalidateAllCaches() (bool, *AppError) {
+ c.clearExtraProperties()
+ if r, err := c.DoApiGet("/admin/invalidate_all_caches", "", ""); err != nil {
+ return false, err
+ } else {
+ c.fillInExtraProperties(r)
+ return c.CheckStatusOK(r), nil
+ }
+}
+
+func (c *Client) SaveConfig(config *Config) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/admin/save_config", config.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+// RecycleDatabaseConnection will attempt to recycle the database connections.
+// You must have the system admin role to call this method. It will return status=OK
+// if it's successfully recycled the connections, otherwise check the returned error.
+func (c *Client) RecycleDatabaseConnection() (bool, *AppError) {
+ c.clearExtraProperties()
+ if r, err := c.DoApiGet("/admin/recycle_db_conn", "", ""); err != nil {
+ return false, err
+ } else {
+ c.fillInExtraProperties(r)
+ return c.CheckStatusOK(r), nil
+ }
+}
+
+func (c *Client) TestEmail(config *Config) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/admin/test_email", config.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+// TestLdap will run a connection test on the current LDAP settings.
+// It will return the standard OK response if settings work. Otherwise
+// it will return an appropriate error.
+func (c *Client) TestLdap(config *Config) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/admin/ldap_test", config.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetComplianceReports() (*Result, *AppError) {
+ if r, err := c.DoApiGet("/admin/compliance_reports", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), CompliancesFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) SaveComplianceReport(job *Compliance) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/admin/save_compliance_report", job.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ComplianceFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) DownloadComplianceReport(id string) (*Result, *AppError) {
+ var rq *http.Request
+ rq, _ = http.NewRequest("GET", c.ApiUrl+"/admin/download_compliance_report/"+id, nil)
+ rq.Close = true
+
+ if len(c.AuthToken) > 0 {
+ rq.Header.Set(HEADER_AUTH, "BEARER "+c.AuthToken)
+ }
+
+ if rp, err := c.HttpClient.Do(rq); err != nil {
+ return nil, NewAppError("/admin/download_compliance_report", "model.client.connecting.app_error", nil, err.Error(), 0)
+ } else if rp.StatusCode >= 300 {
+ defer rp.Body.Close()
+ return nil, AppErrorFromJson(rp.Body)
+ } else {
+ defer closeBody(rp)
+ return &Result{rp.Header.Get(HEADER_REQUEST_ID),
+ rp.Header.Get(HEADER_ETAG_SERVER), rp.Body}, nil
+ }
+}
+
+func (c *Client) GetTeamAnalytics(teamId, name string) (*Result, *AppError) {
+ if r, err := c.DoApiGet("/admin/analytics/"+teamId+"/"+name, "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), AnalyticsRowsFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetSystemAnalytics(name string) (*Result, *AppError) {
+ if r, err := c.DoApiGet("/admin/analytics/"+name, "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), AnalyticsRowsFromJson(r.Body)}, nil
+ }
+}
+
+// Initiate immediate synchronization of LDAP users.
+// The synchronization will be performed asynchronously and this function will
+// always return OK unless you don't have permissions.
+// You must be the system administrator to use this function.
+func (c *Client) LdapSyncNow() (*Result, *AppError) {
+ if r, err := c.DoApiPost("/admin/ldap_sync_now", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) CreateChannel(channel *Channel) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/create", channel.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) CreateDirectChannel(userId string) (*Result, *AppError) {
+ data := make(map[string]string)
+ data["user_id"] = userId
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/create_direct", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) CreateGroupChannel(userIds []string) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/create_group", ArrayToJson(userIds)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) UpdateChannel(channel *Channel) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/update", channel.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) UpdateChannelHeader(data map[string]string) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/update_header", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) UpdateChannelPurpose(data map[string]string) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/update_purpose", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) UpdateNotifyProps(data map[string]string) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/update_notify_props", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetMyChannelMembers() (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetTeamRoute()+"/channels/members", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelMembersFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetChannel(id, etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetChannelRoute(id)+"/", "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelDataFromJson(r.Body)}, nil
+ }
+}
+
+// GetMoreChannelsPage will return a page of open channels the user is not in based on
+// the provided offset and limit. Must be authenticated.
+func (c *Client) GetMoreChannelsPage(offset int, limit int) (*Result, *AppError) {
+ if r, err := c.DoApiGet(fmt.Sprintf(c.GetTeamRoute()+"/channels/more/%v/%v", offset, limit), "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelListFromJson(r.Body)}, nil
+ }
+}
+
+// SearchMoreChannels will return a list of open channels the user is not in, that matches
+// the search criteria provided. Must be authenticated.
+func (c *Client) SearchMoreChannels(channelSearch ChannelSearch) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/more/search", channelSearch.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelListFromJson(r.Body)}, nil
+ }
+}
+
+// AutocompleteChannels will return a list of open channels that match the provided
+// string. Must be authenticated.
+func (c *Client) AutocompleteChannels(term string) (*Result, *AppError) {
+ url := fmt.Sprintf("%s/channels/autocomplete?term=%s", c.GetTeamRoute(), url.QueryEscape(term))
+ if r, err := c.DoApiGet(url, "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelListFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetChannelCounts(etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetTeamRoute()+"/channels/counts", "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelCountsFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetChannels(etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetTeamRoute()+"/channels/", "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelListFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetChannelByName(channelName string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetChannelNameRoute(channelName), "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) JoinChannel(id string) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetChannelRoute(id)+"/join", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), nil}, nil
+ }
+}
+
+func (c *Client) JoinChannelByName(name string) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetChannelNameRoute(name)+"/join", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), nil}, nil
+ }
+}
+
+func (c *Client) LeaveChannel(id string) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetChannelRoute(id)+"/leave", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), nil}, nil
+ }
+}
+
+func (c *Client) DeleteChannel(id string) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetChannelRoute(id)+"/delete", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), nil}, nil
+ }
+}
+
+func (c *Client) AddChannelMember(id, user_id string) (*Result, *AppError) {
+ data := make(map[string]string)
+ data["user_id"] = user_id
+ if r, err := c.DoApiPost(c.GetChannelRoute(id)+"/add", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), nil}, nil
+ }
+}
+
+func (c *Client) RemoveChannelMember(id, user_id string) (*Result, *AppError) {
+ data := make(map[string]string)
+ data["user_id"] = user_id
+ if r, err := c.DoApiPost(c.GetChannelRoute(id)+"/remove", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), nil}, nil
+ }
+}
+
+// ViewChannel performs all the actions related to viewing a channel. This includes marking
+// the channel and the previous one as read, and marking the channel as being actively viewed.
+// ChannelId is required but may be blank to indicate no channel is being viewed.
+// PrevChannelId is optional, populate to indicate a channel switch occurred.
+func (c *Client) ViewChannel(params ChannelView) (bool, *ResponseMetadata) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/view", params.ToJson()); err != nil {
+ return false, &ResponseMetadata{StatusCode: r.StatusCode, Error: err}
+ } else {
+ return c.CheckStatusOK(r),
+ &ResponseMetadata{
+ StatusCode: r.StatusCode,
+ RequestId: r.Header.Get(HEADER_REQUEST_ID),
+ Etag: r.Header.Get(HEADER_ETAG_SERVER),
+ }
+ }
+}
+
+func (c *Client) GetChannelStats(id string, etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetChannelRoute(id)+"/stats", "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelStatsFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetChannelMember(channelId string, userId string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/members/"+userId, "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelMemberFromJson(r.Body)}, nil
+ }
+}
+
+// GetChannelMembersByIds will return channel member objects as an array based on the
+// channel id and a list of user ids provided. Must be authenticated.
+func (c *Client) GetChannelMembersByIds(channelId string, userIds []string) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/members/ids", ArrayToJson(userIds)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), ChannelMembersFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) CreatePost(post *Post) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetChannelRoute(post.ChannelId)+"/posts/create", post.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), PostFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) UpdatePost(post *Post) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetChannelRoute(post.ChannelId)+"/posts/update", post.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), PostFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetPosts(channelId string, offset int, limit int, etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/page/%v/%v", offset, limit), "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetPostsSince(channelId string, time int64) (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/since/%v", time), "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetPostsBefore(channelId string, postid string, offset int, limit int, etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/%v/before/%v/%v", postid, offset, limit), "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetPostsAfter(channelId string, postid string, offset int, limit int, etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(fmt.Sprintf(c.GetChannelRoute(channelId)+"/posts/%v/after/%v/%v", postid, offset, limit), "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetPost(channelId string, postId string, etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/%v/get", postId), "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil
+ }
+}
+
+// GetPostById returns a post and any posts in the same thread by post id
+func (c *Client) GetPostById(postId string, etag string) (*PostList, *ResponseMetadata) {
+ if r, err := c.DoApiGet(c.GetTeamRoute()+fmt.Sprintf("/posts/%v", postId), "", etag); err != nil {
+ return nil, &ResponseMetadata{StatusCode: r.StatusCode, Error: err}
+ } else {
+ defer closeBody(r)
+ return PostListFromJson(r.Body),
+ &ResponseMetadata{
+ StatusCode: r.StatusCode,
+ RequestId: r.Header.Get(HEADER_REQUEST_ID),
+ Etag: r.Header.Get(HEADER_ETAG_SERVER),
+ }
+ }
+}
+
+// GetPermalink returns a post list, based on the provided channel and post ID.
+func (c *Client) GetPermalink(channelId string, postId string, etag string) (*PostList, *ResponseMetadata) {
+ if r, err := c.DoApiGet(c.GetTeamRoute()+fmt.Sprintf("/pltmp/%v", postId), "", etag); err != nil {
+ return nil, &ResponseMetadata{StatusCode: r.StatusCode, Error: err}
+ } else {
+ defer closeBody(r)
+ return PostListFromJson(r.Body),
+ &ResponseMetadata{
+ StatusCode: r.StatusCode,
+ RequestId: r.Header.Get(HEADER_REQUEST_ID),
+ Etag: r.Header.Get(HEADER_ETAG_SERVER),
+ }
+ }
+}
+
+func (c *Client) DeletePost(channelId string, postId string) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/%v/delete", postId), ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) SearchPosts(terms string, isOrSearch bool) (*Result, *AppError) {
+ data := map[string]interface{}{}
+ data["terms"] = terms
+ data["is_or_search"] = isOrSearch
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/posts/search", StringInterfaceToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil
+ }
+}
+
+// GetFlaggedPosts will return a post list of posts that have been flagged by the user.
+// The page is set by the integer parameters offset and limit.
+func (c *Client) GetFlaggedPosts(offset int, limit int) (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetTeamRoute()+fmt.Sprintf("/posts/flagged/%v/%v", offset, limit), "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetPinnedPosts(channelId string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/pinned", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) UploadProfileFile(data []byte, contentType string) (*Result, *AppError) {
+ return c.uploadFile(c.ApiUrl+"/users/newimage", data, contentType)
+}
+
+func (c *Client) UploadPostAttachment(data []byte, channelId string, filename string) (*FileUploadResponse, *AppError) {
+ c.clearExtraProperties()
+
+ body := &bytes.Buffer{}
+ writer := multipart.NewWriter(body)
+
+ if part, err := writer.CreateFormFile("files", filename); err != nil {
+ return nil, NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), 0)
+ } else if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil {
+ return nil, NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), 0)
+ }
+
+ if part, err := writer.CreateFormField("channel_id"); err != nil {
+ return nil, NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.channel_id.app_error", nil, err.Error(), 0)
+ } else if _, err = io.Copy(part, strings.NewReader(channelId)); err != nil {
+ return nil, NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.channel_id.app_error", nil, err.Error(), 0)
+ }
+
+ if err := writer.Close(); err != nil {
+ return nil, NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.writer.app_error", nil, err.Error(), 0)
+ }
+
+ if result, err := c.uploadFile(c.ApiUrl+c.GetTeamRoute()+"/files/upload", body.Bytes(), writer.FormDataContentType()); err != nil {
+ return nil, err
+ } else {
+ return result.Data.(*FileUploadResponse), nil
+ }
+}
+
+func (c *Client) uploadFile(url string, data []byte, contentType string) (*Result, *AppError) {
+ rq, _ := http.NewRequest("POST", url, bytes.NewReader(data))
+ rq.Header.Set("Content-Type", contentType)
+ rq.Close = true
+
+ if len(c.AuthToken) > 0 {
+ rq.Header.Set(HEADER_AUTH, "BEARER "+c.AuthToken)
+ }
+
+ if rp, err := c.HttpClient.Do(rq); err != nil {
+ return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0)
+ } else if rp.StatusCode >= 300 {
+ return nil, AppErrorFromJson(rp.Body)
+ } else {
+ defer closeBody(rp)
+ return &Result{rp.Header.Get(HEADER_REQUEST_ID),
+ rp.Header.Get(HEADER_ETAG_SERVER), FileUploadResponseFromJson(rp.Body)}, nil
+ }
+}
+
+func (c *Client) GetFile(fileId string) (io.ReadCloser, *AppError) {
+ if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/get", "", ""); err != nil {
+ return nil, err
+ } else {
+ c.fillInExtraProperties(r)
+ return r.Body, nil
+ }
+}
+
+func (c *Client) GetFileThumbnail(fileId string) (io.ReadCloser, *AppError) {
+ if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/get_thumbnail", "", ""); err != nil {
+ return nil, err
+ } else {
+ c.fillInExtraProperties(r)
+ return r.Body, nil
+ }
+}
+
+func (c *Client) GetFilePreview(fileId string) (io.ReadCloser, *AppError) {
+ if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/get_preview", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ c.fillInExtraProperties(r)
+ return r.Body, nil
+ }
+}
+
+func (c *Client) GetFileInfo(fileId string) (*FileInfo, *AppError) {
+ if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/get_info", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ c.fillInExtraProperties(r)
+ return FileInfoFromJson(r.Body), nil
+ }
+}
+
+func (c *Client) GetPublicLink(fileId string) (string, *AppError) {
+ if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/get_public_link", "", ""); err != nil {
+ return "", err
+ } else {
+ defer closeBody(r)
+ c.fillInExtraProperties(r)
+ return StringFromJson(r.Body), nil
+ }
+}
+
+func (c *Client) UpdateUser(user *User) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/users/update", user.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) UpdateUserRoles(userId string, roles string) (*Result, *AppError) {
+ data := make(map[string]string)
+ data["new_roles"] = roles
+
+ if r, err := c.DoApiPost(c.GetUserRequiredRoute(userId)+"/update_roles", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) UpdateTeamRoles(userId string, roles string) (*Result, *AppError) {
+ data := make(map[string]string)
+ data["new_roles"] = roles
+ data["user_id"] = userId
+
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/update_member_roles", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) AttachDeviceId(deviceId string) (*Result, *AppError) {
+ data := make(map[string]string)
+ data["device_id"] = deviceId
+ if r, err := c.DoApiPost("/users/attach_device", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) UpdateActive(userId string, active bool) (*Result, *AppError) {
+ data := make(map[string]string)
+ data["user_id"] = userId
+ data["active"] = strconv.FormatBool(active)
+ if r, err := c.DoApiPost("/users/update_active", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) UpdateUserNotify(data map[string]string) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/users/update_notify", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) UpdateUserPassword(userId, currentPassword, newPassword string) (*Result, *AppError) {
+ data := make(map[string]string)
+ data["current_password"] = currentPassword
+ data["new_password"] = newPassword
+ data["user_id"] = userId
+
+ if r, err := c.DoApiPost("/users/newpassword", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) SendPasswordReset(email string) (*Result, *AppError) {
+ data := map[string]string{}
+ data["email"] = email
+ if r, err := c.DoApiPost("/users/send_password_reset", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) ResetPassword(code, newPassword string) (*Result, *AppError) {
+ data := map[string]string{}
+ data["code"] = code
+ data["new_password"] = newPassword
+ if r, err := c.DoApiPost("/users/reset_password", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) AdminResetPassword(userId, newPassword string) (*Result, *AppError) {
+ data := map[string]string{}
+ data["user_id"] = userId
+ data["new_password"] = newPassword
+ if r, err := c.DoApiPost("/admin/reset_password", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+// GetStatuses returns a map of string statuses using user id as the key
+func (c *Client) GetStatuses() (*Result, *AppError) {
+ if r, err := c.DoApiGet("/users/status", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+// GetStatusesByIds returns a map of string statuses using user id as the key,
+// based on the provided user ids
+func (c *Client) GetStatusesByIds(userIds []string) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/users/status/ids", ArrayToJson(userIds)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetMyTeam(etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetTeamRoute()+"/me", "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), TeamFromJson(r.Body)}, nil
+ }
+}
+
+// GetTeamMembers will return a page of team member objects as an array paged based on the
+// team id, offset and limit provided. Must be authenticated.
+func (c *Client) GetTeamMembers(teamId string, offset int, limit int) (*Result, *AppError) {
+ if r, err := c.DoApiGet(fmt.Sprintf("/teams/%v/members/%v/%v", teamId, offset, limit), "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), TeamMembersFromJson(r.Body)}, nil
+ }
+}
+
+// GetMyTeamMembers will return an array with team member objects that the current user
+// is a member of. Must be authenticated.
+func (c *Client) GetMyTeamMembers() (*Result, *AppError) {
+ if r, err := c.DoApiGet("/teams/members", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), TeamMembersFromJson(r.Body)}, nil
+ }
+}
+
+// GetMyTeamsUnread will return an array with TeamUnread objects that contain the amount of
+// unread messages and mentions the current user has for the teams it belongs to.
+// An optional team ID can be set to exclude that team from the results. Must be authenticated.
+func (c *Client) GetMyTeamsUnread(teamId string) (*Result, *AppError) {
+ endpoint := "/teams/unread"
+
+ if teamId != "" {
+ endpoint += fmt.Sprintf("?id=%s", url.QueryEscape(teamId))
+ }
+ if r, err := c.DoApiGet(endpoint, "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), TeamsUnreadFromJson(r.Body)}, nil
+ }
+}
+
+// GetTeamMember will return a team member object based on the team id and user id provided.
+// Must be authenticated.
+func (c *Client) GetTeamMember(teamId string, userId string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(fmt.Sprintf("/teams/%v/members/%v", teamId, userId), "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), TeamMemberFromJson(r.Body)}, nil
+ }
+}
+
+// GetTeamStats will return a team stats object containing the number of users on the team
+// based on the team id provided. Must be authenticated.
+func (c *Client) GetTeamStats(teamId string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(fmt.Sprintf("/teams/%v/stats", teamId), "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), TeamStatsFromJson(r.Body)}, nil
+ }
+}
+
+// GetTeamByName will return a team object based on the team name provided. Must be authenticated.
+func (c *Client) GetTeamByName(teamName string) (*Result, *AppError) {
+ if r, err := c.DoApiGet(fmt.Sprintf("/teams/name/%v", teamName), "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), TeamFromJson(r.Body)}, nil
+ }
+}
+
+// GetTeamMembersByIds will return team member objects as an array based on the
+// team id and a list of user ids provided. Must be authenticated.
+func (c *Client) GetTeamMembersByIds(teamId string, userIds []string) (*Result, *AppError) {
+ if r, err := c.DoApiPost(fmt.Sprintf("/teams/%v/members/ids", teamId), ArrayToJson(userIds)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), TeamMembersFromJson(r.Body)}, nil
+ }
+}
+
+// RegisterApp creates a new OAuth2 app to be used with the OAuth2 Provider. On success
+// it returns the created app. Must be authenticated as a user.
+func (c *Client) RegisterApp(app *OAuthApp) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/oauth/register", app.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), OAuthAppFromJson(r.Body)}, nil
+ }
+}
+
+// AllowOAuth allows a new session by an OAuth2 App. On success
+// it returns the url to be redirected back to the app which initiated the oauth2 flow.
+// Must be authenticated as a user.
+func (c *Client) AllowOAuth(rspType, clientId, redirect, scope, state string) (*Result, *AppError) {
+ if r, err := c.DoApiGet("/oauth/allow?response_type="+rspType+"&client_id="+clientId+"&redirect_uri="+url.QueryEscape(redirect)+"&scope="+scope+"&state="+url.QueryEscape(state), "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+// GetOAuthAppsByUser returns the OAuth2 Apps registered by the user. On success
+// it returns a list of OAuth2 Apps from the same user or all the registered apps if the user
+// is a System Administrator. Must be authenticated as a user.
+func (c *Client) GetOAuthAppsByUser() (*Result, *AppError) {
+ if r, err := c.DoApiGet("/oauth/list", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), OAuthAppListFromJson(r.Body)}, nil
+ }
+}
+
+// GetOAuthAppInfo lookup an OAuth2 App using the client_id. On success
+// it returns a Sanitized OAuth2 App. Must be authenticated as a user.
+func (c *Client) GetOAuthAppInfo(clientId string) (*Result, *AppError) {
+ if r, err := c.DoApiGet("/oauth/app/"+clientId, "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), OAuthAppFromJson(r.Body)}, nil
+ }
+}
+
+// DeleteOAuthApp deletes an OAuth2 app, the app must be deleted by the same user who created it or
+// a System Administrator. On success returs Status OK. Must be authenticated as a user.
+func (c *Client) DeleteOAuthApp(id string) (*Result, *AppError) {
+ data := make(map[string]string)
+ data["id"] = id
+ if r, err := c.DoApiPost("/oauth/delete", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+// GetOAuthAuthorizedApps returns the OAuth2 Apps authorized by the user. On success
+// it returns a list of sanitized OAuth2 Authorized Apps by the user.
+func (c *Client) GetOAuthAuthorizedApps() (*Result, *AppError) {
+ if r, err := c.DoApiGet("/oauth/authorized", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), OAuthAppListFromJson(r.Body)}, nil
+ }
+}
+
+// OAuthDeauthorizeApp deauthorize a user an OAuth 2.0 app. On success
+// it returns status OK or an AppError on fail.
+func (c *Client) OAuthDeauthorizeApp(clientId string) *AppError {
+ if r, err := c.DoApiPost("/oauth/"+clientId+"/deauthorize", ""); err != nil {
+ return err
+ } else {
+ defer closeBody(r)
+ return nil
+ }
+}
+
+// RegenerateOAuthAppSecret generates a new OAuth App Client Secret. On success
+// it returns an OAuth2 App. Must be authenticated as a user and the same user who
+// registered the app or a System Admin.
+func (c *Client) RegenerateOAuthAppSecret(clientId string) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/oauth/"+clientId+"/regen_secret", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), OAuthAppFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetAccessToken(data url.Values) (*Result, *AppError) {
+ if r, err := c.DoPost("/oauth/access_token", data.Encode(), "application/x-www-form-urlencoded"); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), AccessResponseFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) CreateIncomingWebhook(hook *IncomingWebhook) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/hooks/incoming/create", hook.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), IncomingWebhookFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) UpdateIncomingWebhook(hook *IncomingWebhook) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/hooks/incoming/update", hook.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), IncomingWebhookFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) PostToWebhook(id, payload string) (*Result, *AppError) {
+ if r, err := c.DoPost("/hooks/"+id, payload, "application/x-www-form-urlencoded"); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), nil}, nil
+ }
+}
+
+func (c *Client) DeleteIncomingWebhook(id string) (*Result, *AppError) {
+ data := make(map[string]string)
+ data["id"] = id
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/hooks/incoming/delete", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) ListIncomingWebhooks() (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetTeamRoute()+"/hooks/incoming/list", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), IncomingWebhookListFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetAllPreferences() (*Result, *AppError) {
+ if r, err := c.DoApiGet("/preferences/", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ preferences, _ := PreferencesFromJson(r.Body)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID), r.Header.Get(HEADER_ETAG_SERVER), preferences}, nil
+ }
+}
+
+func (c *Client) SetPreferences(preferences *Preferences) (*Result, *AppError) {
+ if r, err := c.DoApiPost("/preferences/save", preferences.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), preferences}, nil
+ }
+}
+
+func (c *Client) GetPreference(category string, name string) (*Result, *AppError) {
+ if r, err := c.DoApiGet("/preferences/"+category+"/"+name, "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID), r.Header.Get(HEADER_ETAG_SERVER), PreferenceFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetPreferenceCategory(category string) (*Result, *AppError) {
+ if r, err := c.DoApiGet("/preferences/"+category, "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ preferences, _ := PreferencesFromJson(r.Body)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID), r.Header.Get(HEADER_ETAG_SERVER), preferences}, nil
+ }
+}
+
+// DeletePreferences deletes a list of preferences owned by the current user. If successful,
+// it will return status=ok. Otherwise, an error will be returned.
+func (c *Client) DeletePreferences(preferences *Preferences) (bool, *AppError) {
+ if r, err := c.DoApiPost("/preferences/delete", preferences.ToJson()); err != nil {
+ return false, err
+ } else {
+ return c.CheckStatusOK(r), nil
+ }
+}
+
+func (c *Client) CreateOutgoingWebhook(hook *OutgoingWebhook) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/hooks/outgoing/create", hook.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), OutgoingWebhookFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) UpdateOutgoingWebhook(hook *OutgoingWebhook) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/hooks/outgoing/update", hook.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), OutgoingWebhookFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) DeleteOutgoingWebhook(id string) (*Result, *AppError) {
+ data := make(map[string]string)
+ data["id"] = id
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/hooks/outgoing/delete", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) ListOutgoingWebhooks() (*Result, *AppError) {
+ if r, err := c.DoApiGet(c.GetTeamRoute()+"/hooks/outgoing/list", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), OutgoingWebhookListFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) RegenOutgoingWebhookToken(id string) (*Result, *AppError) {
+ data := make(map[string]string)
+ data["id"] = id
+ if r, err := c.DoApiPost(c.GetTeamRoute()+"/hooks/outgoing/regen_token", MapToJson(data)); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), OutgoingWebhookFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) MockSession(sessionToken string) {
+ c.AuthToken = sessionToken
+ c.AuthType = HEADER_BEARER
+}
+
+func (c *Client) GetClientLicenceConfig(etag string) (*Result, *AppError) {
+ if r, err := c.DoApiGet("/license/client_config", "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) GetInitialLoad() (*Result, *AppError) {
+ if r, err := c.DoApiGet("/users/initial_load", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), InitialLoadFromJson(r.Body)}, nil
+ }
+}
+
+// ListEmoji returns a list of all user-created emoji for the server.
+func (c *Client) ListEmoji() ([]*Emoji, *AppError) {
+ if r, err := c.DoApiGet(c.GetEmojiRoute()+"/list", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ c.fillInExtraProperties(r)
+ return EmojiListFromJson(r.Body), nil
+ }
+}
+
+// CreateEmoji will save an emoji to the server if the current user has permission
+// to do so. If successful, the provided emoji will be returned with its Id field
+// filled in. Otherwise, an error will be returned.
+func (c *Client) CreateEmoji(emoji *Emoji, image []byte, filename string) (*Emoji, *AppError) {
+ c.clearExtraProperties()
+
+ body := &bytes.Buffer{}
+ writer := multipart.NewWriter(body)
+
+ if part, err := writer.CreateFormFile("image", filename); err != nil {
+ return nil, NewAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error(), 0)
+ } else if _, err = io.Copy(part, bytes.NewBuffer(image)); err != nil {
+ return nil, NewAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error(), 0)
+ }
+
+ if err := writer.WriteField("emoji", emoji.ToJson()); err != nil {
+ return nil, NewAppError("CreateEmoji", "model.client.create_emoji.emoji.app_error", nil, err.Error(), 0)
+ }
+
+ if err := writer.Close(); err != nil {
+ return nil, NewAppError("CreateEmoji", "model.client.create_emoji.writer.app_error", nil, err.Error(), 0)
+ }
+
+ rq, _ := http.NewRequest("POST", c.ApiUrl+c.GetEmojiRoute()+"/create", body)
+ rq.Header.Set("Content-Type", writer.FormDataContentType())
+ rq.Close = true
+
+ if len(c.AuthToken) > 0 {
+ rq.Header.Set(HEADER_AUTH, "BEARER "+c.AuthToken)
+ }
+
+ if r, err := c.HttpClient.Do(rq); err != nil {
+ return nil, NewAppError("CreateEmoji", "model.client.connecting.app_error", nil, err.Error(), 0)
+ } else if r.StatusCode >= 300 {
+ return nil, AppErrorFromJson(r.Body)
+ } else {
+ defer closeBody(r)
+ c.fillInExtraProperties(r)
+ return EmojiFromJson(r.Body), nil
+ }
+}
+
+// DeleteEmoji will delete an emoji from the server if the current user has permission
+// to do so. If successful, it will return status=ok. Otherwise, an error will be returned.
+func (c *Client) DeleteEmoji(id string) (bool, *AppError) {
+ data := map[string]string{"id": id}
+
+ if r, err := c.DoApiPost(c.GetEmojiRoute()+"/delete", MapToJson(data)); err != nil {
+ return false, err
+ } else {
+ defer closeBody(r)
+ c.fillInExtraProperties(r)
+ return c.CheckStatusOK(r), nil
+ }
+}
+
+// GetCustomEmojiImageUrl returns the API route that can be used to get the image used by
+// the given emoji.
+func (c *Client) GetCustomEmojiImageUrl(id string) string {
+ return c.GetEmojiRoute() + "/" + id
+}
+
+// Uploads a x509 base64 Certificate or Private Key file to be used with SAML.
+// data byte array is required and needs to be a Multi-Part with 'certificate' as the field name
+// contentType is also required. Returns nil if succesful, otherwise returns an AppError
+func (c *Client) UploadCertificateFile(data []byte, contentType string) *AppError {
+ url := c.ApiUrl + "/admin/add_certificate"
+ rq, _ := http.NewRequest("POST", url, bytes.NewReader(data))
+ rq.Header.Set("Content-Type", contentType)
+ rq.Close = true
+
+ if len(c.AuthToken) > 0 {
+ rq.Header.Set(HEADER_AUTH, "BEARER "+c.AuthToken)
+ }
+
+ if rp, err := c.HttpClient.Do(rq); err != nil {
+ return NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0)
+ } else if rp.StatusCode >= 300 {
+ return AppErrorFromJson(rp.Body)
+ } else {
+ defer closeBody(rp)
+ c.fillInExtraProperties(rp)
+ return nil
+ }
+}
+
+// Removes a x509 base64 Certificate or Private Key file used with SAML.
+// filename is required. Returns nil if successful, otherwise returns an AppError
+func (c *Client) RemoveCertificateFile(filename string) *AppError {
+ if r, err := c.DoApiPost("/admin/remove_certificate", MapToJson(map[string]string{"filename": filename})); err != nil {
+ return err
+ } else {
+ defer closeBody(r)
+ c.fillInExtraProperties(r)
+ return nil
+ }
+}
+
+// Checks if the x509 base64 Certificates and Private Key files used with SAML exists on the file system.
+// Returns a map[string]interface{} if successful, otherwise returns an AppError. Must be System Admin authenticated.
+func (c *Client) SamlCertificateStatus(filename string) (map[string]interface{}, *AppError) {
+ if r, err := c.DoApiGet("/admin/remove_certificate", "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ c.fillInExtraProperties(r)
+ return StringInterfaceFromJson(r.Body), nil
+ }
+}
+
+// GetWebrtcToken if Successful returns a map with a valid token, stun server and turn server with credentials to use with
+// the Mattermost WebRTC service, otherwise returns an AppError. Must be authenticated user.
+func (c *Client) GetWebrtcToken() (map[string]string, *AppError) {
+ if r, err := c.DoApiPost("/webrtc/token", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return MapFromJson(r.Body), nil
+ }
+}
+
+// GetFileInfosForPost returns a list of FileInfo objects for a given post id, if successful.
+// Otherwise, it returns an error.
+func (c *Client) GetFileInfosForPost(channelId string, postId string, etag string) ([]*FileInfo, *AppError) {
+ c.clearExtraProperties()
+
+ if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/%v/get_file_infos", postId), "", etag); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ c.fillInExtraProperties(r)
+ return FileInfosFromJson(r.Body), nil
+ }
+}
+
+// Saves an emoji reaction for a post in the given channel. Returns the saved reaction if successful, otherwise returns an AppError.
+func (c *Client) SaveReaction(channelId string, reaction *Reaction) (*Reaction, *AppError) {
+ if r, err := c.DoApiPost(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/%v/reactions/save", reaction.PostId), reaction.ToJson()); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ c.fillInExtraProperties(r)
+ return ReactionFromJson(r.Body), nil
+ }
+}
+
+// Removes an emoji reaction for a post in the given channel. Returns nil if successful, otherwise returns an AppError.
+func (c *Client) DeleteReaction(channelId string, reaction *Reaction) *AppError {
+ if r, err := c.DoApiPost(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/%v/reactions/delete", reaction.PostId), reaction.ToJson()); err != nil {
+ return err
+ } else {
+ defer closeBody(r)
+ c.fillInExtraProperties(r)
+ return nil
+ }
+}
+
+// Lists all emoji reactions made for the given post in the given channel. Returns a list of Reactions if successful, otherwise returns an AppError.
+func (c *Client) ListReactions(channelId string, postId string) ([]*Reaction, *AppError) {
+ if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/%v/reactions", postId), "", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ c.fillInExtraProperties(r)
+ return ReactionsFromJson(r.Body), nil
+ }
+}
+
+// Updates the user's roles in the channel by replacing them with the roles provided.
+func (c *Client) UpdateChannelRoles(channelId string, userId string, roles string) (map[string]string, *ResponseMetadata) {
+ data := make(map[string]string)
+ data["new_roles"] = roles
+ data["user_id"] = userId
+
+ if r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/update_member_roles", MapToJson(data)); err != nil {
+ metadata := ResponseMetadata{Error: err}
+ if r != nil {
+ metadata.StatusCode = r.StatusCode
+ }
+ return nil, &metadata
+ } else {
+ defer closeBody(r)
+ return MapFromJson(r.Body),
+ &ResponseMetadata{
+ StatusCode: r.StatusCode,
+ RequestId: r.Header.Get(HEADER_REQUEST_ID),
+ Etag: r.Header.Get(HEADER_ETAG_SERVER),
+ }
+ }
+}
+
+func (c *Client) PinPost(channelId string, postId string) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/posts/"+postId+"/pin", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), PostFromJson(r.Body)}, nil
+ }
+}
+
+func (c *Client) UnpinPost(channelId string, postId string) (*Result, *AppError) {
+ if r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/posts/"+postId+"/unpin", ""); err != nil {
+ return nil, err
+ } else {
+ defer closeBody(r)
+ return &Result{r.Header.Get(HEADER_REQUEST_ID),
+ r.Header.Get(HEADER_ETAG_SERVER), PostFromJson(r.Body)}, nil
+ }
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/client4.go b/vendor/github.com/mattermost/mattermost-server/model/client4.go
new file mode 100644
index 00000000..962b816b
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/client4.go
@@ -0,0 +1,3299 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+type Response struct {
+ StatusCode int
+ Error *AppError
+ RequestId string
+ Etag string
+ ServerVersion string
+ Header http.Header
+}
+
+type Client4 struct {
+ Url string // The location of the server, for example "http://localhost:8065"
+ ApiUrl string // The api location of the server, for example "http://localhost:8065/api/v4"
+ HttpClient *http.Client // The http client
+ AuthToken string
+ AuthType string
+}
+
+func NewAPIv4Client(url string) *Client4 {
+ return &Client4{url, url + API_URL_SUFFIX, &http.Client{}, "", ""}
+}
+
+func BuildErrorResponse(r *http.Response, err *AppError) *Response {
+ var statusCode int
+ var header http.Header
+ if r != nil {
+ statusCode = r.StatusCode
+ header = r.Header
+ } else {
+ statusCode = 0
+ header = make(http.Header)
+ }
+
+ return &Response{
+ StatusCode: statusCode,
+ Error: err,
+ Header: header,
+ }
+}
+
+func BuildResponse(r *http.Response) *Response {
+ return &Response{
+ StatusCode: r.StatusCode,
+ RequestId: r.Header.Get(HEADER_REQUEST_ID),
+ Etag: r.Header.Get(HEADER_ETAG_SERVER),
+ ServerVersion: r.Header.Get(HEADER_VERSION_ID),
+ Header: r.Header,
+ }
+}
+
+func (c *Client4) SetOAuthToken(token string) {
+ c.AuthToken = token
+ c.AuthType = HEADER_TOKEN
+}
+
+func (c *Client4) ClearOAuthToken() {
+ c.AuthToken = ""
+ c.AuthType = HEADER_BEARER
+}
+
+func (c *Client4) GetUsersRoute() string {
+ return fmt.Sprintf("/users")
+}
+
+func (c *Client4) GetUserRoute(userId string) string {
+ return fmt.Sprintf(c.GetUsersRoute()+"/%v", userId)
+}
+
+func (c *Client4) GetUserAccessTokensRoute() string {
+ return fmt.Sprintf(c.GetUsersRoute() + "/tokens")
+}
+
+func (c *Client4) GetUserAccessTokenRoute(tokenId string) string {
+ return fmt.Sprintf(c.GetUsersRoute()+"/tokens/%v", tokenId)
+}
+
+func (c *Client4) GetUserByUsernameRoute(userName string) string {
+ return fmt.Sprintf(c.GetUsersRoute()+"/username/%v", userName)
+}
+
+func (c *Client4) GetUserByEmailRoute(email string) string {
+ return fmt.Sprintf(c.GetUsersRoute()+"/email/%v", email)
+}
+
+func (c *Client4) GetTeamsRoute() string {
+ return fmt.Sprintf("/teams")
+}
+
+func (c *Client4) GetTeamRoute(teamId string) string {
+ return fmt.Sprintf(c.GetTeamsRoute()+"/%v", teamId)
+}
+
+func (c *Client4) GetTeamAutoCompleteCommandsRoute(teamId string) string {
+ return fmt.Sprintf(c.GetTeamsRoute()+"/%v/commands/autocomplete", teamId)
+}
+
+func (c *Client4) GetTeamByNameRoute(teamName string) string {
+ return fmt.Sprintf(c.GetTeamsRoute()+"/name/%v", teamName)
+}
+
+func (c *Client4) GetTeamMemberRoute(teamId, userId string) string {
+ return fmt.Sprintf(c.GetTeamRoute(teamId)+"/members/%v", userId)
+}
+
+func (c *Client4) GetTeamMembersRoute(teamId string) string {
+ return fmt.Sprintf(c.GetTeamRoute(teamId) + "/members")
+}
+
+func (c *Client4) GetTeamStatsRoute(teamId string) string {
+ return fmt.Sprintf(c.GetTeamRoute(teamId) + "/stats")
+}
+
+func (c *Client4) GetTeamImportRoute(teamId string) string {
+ return fmt.Sprintf(c.GetTeamRoute(teamId) + "/import")
+}
+
+func (c *Client4) GetChannelsRoute() string {
+ return fmt.Sprintf("/channels")
+}
+
+func (c *Client4) GetChannelsForTeamRoute(teamId string) string {
+ return fmt.Sprintf(c.GetTeamRoute(teamId) + "/channels")
+}
+
+func (c *Client4) GetChannelRoute(channelId string) string {
+ return fmt.Sprintf(c.GetChannelsRoute()+"/%v", channelId)
+}
+
+func (c *Client4) GetChannelByNameRoute(channelName, teamId string) string {
+ return fmt.Sprintf(c.GetTeamRoute(teamId)+"/channels/name/%v", channelName)
+}
+
+func (c *Client4) GetChannelByNameForTeamNameRoute(channelName, teamName string) string {
+ return fmt.Sprintf(c.GetTeamByNameRoute(teamName)+"/channels/name/%v", channelName)
+}
+
+func (c *Client4) GetChannelMembersRoute(channelId string) string {
+ return fmt.Sprintf(c.GetChannelRoute(channelId) + "/members")
+}
+
+func (c *Client4) GetChannelMemberRoute(channelId, userId string) string {
+ return fmt.Sprintf(c.GetChannelMembersRoute(channelId)+"/%v", userId)
+}
+
+func (c *Client4) GetPostsRoute() string {
+ return fmt.Sprintf("/posts")
+}
+
+func (c *Client4) GetConfigRoute() string {
+ return fmt.Sprintf("/config")
+}
+
+func (c *Client4) GetLicenseRoute() string {
+ return fmt.Sprintf("/license")
+}
+
+func (c *Client4) GetPostRoute(postId string) string {
+ return fmt.Sprintf(c.GetPostsRoute()+"/%v", postId)
+}
+
+func (c *Client4) GetFilesRoute() string {
+ return fmt.Sprintf("/files")
+}
+
+func (c *Client4) GetFileRoute(fileId string) string {
+ return fmt.Sprintf(c.GetFilesRoute()+"/%v", fileId)
+}
+
+func (c *Client4) GetPluginsRoute() string {
+ return fmt.Sprintf("/plugins")
+}
+
+func (c *Client4) GetPluginRoute(pluginId string) string {
+ return fmt.Sprintf(c.GetPluginsRoute()+"/%v", pluginId)
+}
+
+func (c *Client4) GetSystemRoute() string {
+ return fmt.Sprintf("/system")
+}
+
+func (c *Client4) GetTestEmailRoute() string {
+ return fmt.Sprintf("/email/test")
+}
+
+func (c *Client4) GetDatabaseRoute() string {
+ return fmt.Sprintf("/database")
+}
+
+func (c *Client4) GetCacheRoute() string {
+ return fmt.Sprintf("/caches")
+}
+
+func (c *Client4) GetClusterRoute() string {
+ return fmt.Sprintf("/cluster")
+}
+
+func (c *Client4) GetIncomingWebhooksRoute() string {
+ return fmt.Sprintf("/hooks/incoming")
+}
+
+func (c *Client4) GetIncomingWebhookRoute(hookID string) string {
+ return fmt.Sprintf(c.GetIncomingWebhooksRoute()+"/%v", hookID)
+}
+
+func (c *Client4) GetComplianceReportsRoute() string {
+ return fmt.Sprintf("/compliance/reports")
+}
+
+func (c *Client4) GetComplianceReportRoute(reportId string) string {
+ return fmt.Sprintf("/compliance/reports/%v", reportId)
+}
+
+func (c *Client4) GetOutgoingWebhooksRoute() string {
+ return fmt.Sprintf("/hooks/outgoing")
+}
+
+func (c *Client4) GetOutgoingWebhookRoute(hookID string) string {
+ return fmt.Sprintf(c.GetOutgoingWebhooksRoute()+"/%v", hookID)
+}
+
+func (c *Client4) GetPreferencesRoute(userId string) string {
+ return fmt.Sprintf(c.GetUserRoute(userId) + "/preferences")
+}
+
+func (c *Client4) GetUserStatusRoute(userId string) string {
+ return fmt.Sprintf(c.GetUserRoute(userId) + "/status")
+}
+
+func (c *Client4) GetUserStatusesRoute() string {
+ return fmt.Sprintf(c.GetUsersRoute() + "/status")
+}
+
+func (c *Client4) GetSamlRoute() string {
+ return fmt.Sprintf("/saml")
+}
+
+func (c *Client4) GetLdapRoute() string {
+ return fmt.Sprintf("/ldap")
+}
+
+func (c *Client4) GetBrandRoute() string {
+ return fmt.Sprintf("/brand")
+}
+
+func (c *Client4) GetDataRetentionRoute() string {
+ return fmt.Sprintf("/data_retention")
+}
+
+func (c *Client4) GetElasticsearchRoute() string {
+ return fmt.Sprintf("/elasticsearch")
+}
+
+func (c *Client4) GetCommandsRoute() string {
+ return fmt.Sprintf("/commands")
+}
+
+func (c *Client4) GetCommandRoute(commandId string) string {
+ return fmt.Sprintf(c.GetCommandsRoute()+"/%v", commandId)
+}
+
+func (c *Client4) GetEmojisRoute() string {
+ return fmt.Sprintf("/emoji")
+}
+
+func (c *Client4) GetEmojiRoute(emojiId string) string {
+ return fmt.Sprintf(c.GetEmojisRoute()+"/%v", emojiId)
+}
+
+func (c *Client4) GetEmojiByNameRoute(name string) string {
+ return fmt.Sprintf(c.GetEmojisRoute()+"/name/%v", name)
+}
+
+func (c *Client4) GetReactionsRoute() string {
+ return fmt.Sprintf("/reactions")
+}
+
+func (c *Client4) GetOAuthAppsRoute() string {
+ return fmt.Sprintf("/oauth/apps")
+}
+
+func (c *Client4) GetOAuthAppRoute(appId string) string {
+ return fmt.Sprintf("/oauth/apps/%v", appId)
+}
+
+func (c *Client4) GetOpenGraphRoute() string {
+ return fmt.Sprintf("/opengraph")
+}
+
+func (c *Client4) GetJobsRoute() string {
+ return fmt.Sprintf("/jobs")
+}
+
+func (c *Client4) GetAnalyticsRoute() string {
+ return fmt.Sprintf("/analytics")
+}
+
+func (c *Client4) DoApiGet(url string, etag string) (*http.Response, *AppError) {
+ return c.DoApiRequest(http.MethodGet, c.ApiUrl+url, "", etag)
+}
+
+func (c *Client4) DoApiPost(url string, data string) (*http.Response, *AppError) {
+ return c.DoApiRequest(http.MethodPost, c.ApiUrl+url, data, "")
+}
+
+func (c *Client4) DoApiPut(url string, data string) (*http.Response, *AppError) {
+ return c.DoApiRequest(http.MethodPut, c.ApiUrl+url, data, "")
+}
+
+func (c *Client4) DoApiDelete(url string) (*http.Response, *AppError) {
+ return c.DoApiRequest(http.MethodDelete, c.ApiUrl+url, "", "")
+}
+
+func (c *Client4) DoApiRequest(method, url, data, etag string) (*http.Response, *AppError) {
+ rq, _ := http.NewRequest(method, url, strings.NewReader(data))
+ rq.Close = true
+
+ if len(etag) > 0 {
+ rq.Header.Set(HEADER_ETAG_CLIENT, etag)
+ }
+
+ if len(c.AuthToken) > 0 {
+ rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken)
+ }
+
+ if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil {
+ return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0)
+ } else if rp.StatusCode == 304 {
+ return rp, nil
+ } else if rp.StatusCode >= 300 {
+ defer closeBody(rp)
+ return rp, AppErrorFromJson(rp.Body)
+ } else {
+ return rp, nil
+ }
+}
+
+func (c *Client4) DoUploadFile(url string, data []byte, contentType string) (*FileUploadResponse, *Response) {
+ rq, _ := http.NewRequest("POST", c.ApiUrl+url, bytes.NewReader(data))
+ rq.Header.Set("Content-Type", contentType)
+ rq.Close = true
+
+ if len(c.AuthToken) > 0 {
+ rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken)
+ }
+
+ if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil {
+ return nil, BuildErrorResponse(rp, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0))
+ } else {
+ defer closeBody(rp)
+
+ if rp.StatusCode >= 300 {
+ return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body))
+ } else {
+ return FileUploadResponseFromJson(rp.Body), BuildResponse(rp)
+ }
+ }
+}
+
+func (c *Client4) DoEmojiUploadFile(url string, data []byte, contentType string) (*Emoji, *Response) {
+ rq, _ := http.NewRequest("POST", c.ApiUrl+url, bytes.NewReader(data))
+ rq.Header.Set("Content-Type", contentType)
+ rq.Close = true
+
+ if len(c.AuthToken) > 0 {
+ rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken)
+ }
+
+ if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil {
+ return nil, BuildErrorResponse(rp, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0))
+ } else {
+ defer closeBody(rp)
+
+ if rp.StatusCode >= 300 {
+ return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body))
+ } else {
+ return EmojiFromJson(rp.Body), BuildResponse(rp)
+ }
+ }
+}
+
+func (c *Client4) DoUploadImportTeam(url string, data []byte, contentType string) (map[string]string, *Response) {
+ rq, _ := http.NewRequest("POST", c.ApiUrl+url, bytes.NewReader(data))
+ rq.Header.Set("Content-Type", contentType)
+ rq.Close = true
+
+ if len(c.AuthToken) > 0 {
+ rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken)
+ }
+
+ if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil {
+ return nil, BuildErrorResponse(rp, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0))
+ } else {
+ defer closeBody(rp)
+
+ if rp.StatusCode >= 300 {
+ return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body))
+ } else {
+ return MapFromJson(rp.Body), BuildResponse(rp)
+ }
+ }
+}
+
+// CheckStatusOK is a convenience function for checking the standard OK response
+// from the web service.
+func CheckStatusOK(r *http.Response) bool {
+ m := MapFromJson(r.Body)
+ defer closeBody(r)
+
+ if m != nil && m[STATUS] == STATUS_OK {
+ return true
+ }
+
+ return false
+}
+
+// Authentication Section
+
+// LoginById authenticates a user by user id and password.
+func (c *Client4) LoginById(id string, password string) (*User, *Response) {
+ m := make(map[string]string)
+ m["id"] = id
+ m["password"] = password
+ return c.login(m)
+}
+
+// Login authenticates a user by login id, which can be username, email or some sort
+// of SSO identifier based on server configuration, and a password.
+func (c *Client4) Login(loginId string, password string) (*User, *Response) {
+ m := make(map[string]string)
+ m["login_id"] = loginId
+ m["password"] = password
+ return c.login(m)
+}
+
+// LoginByLdap authenticates a user by LDAP id and password.
+func (c *Client4) LoginByLdap(loginId string, password string) (*User, *Response) {
+ m := make(map[string]string)
+ m["login_id"] = loginId
+ m["password"] = password
+ m["ldap_only"] = "true"
+ return c.login(m)
+}
+
+// LoginWithDevice authenticates a user by login id (username, email or some sort
+// of SSO identifier based on configuration), password and attaches a device id to
+// the session.
+func (c *Client4) LoginWithDevice(loginId string, password string, deviceId string) (*User, *Response) {
+ m := make(map[string]string)
+ m["login_id"] = loginId
+ m["password"] = password
+ m["device_id"] = deviceId
+ return c.login(m)
+}
+
+func (c *Client4) login(m map[string]string) (*User, *Response) {
+ if r, err := c.DoApiPost("/users/login", MapToJson(m)); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ c.AuthToken = r.Header.Get(HEADER_TOKEN)
+ c.AuthType = HEADER_BEARER
+ defer closeBody(r)
+ return UserFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// Logout terminates the current user's session.
+func (c *Client4) Logout() (bool, *Response) {
+ if r, err := c.DoApiPost("/users/logout", ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ c.AuthToken = ""
+ c.AuthType = HEADER_BEARER
+
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// SwitchAccountType changes a user's login type from one type to another.
+func (c *Client4) SwitchAccountType(switchRequest *SwitchRequest) (string, *Response) {
+ if r, err := c.DoApiPost(c.GetUsersRoute()+"/login/switch", switchRequest.ToJson()); err != nil {
+ return "", BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return MapFromJson(r.Body)["follow_link"], BuildResponse(r)
+ }
+}
+
+// User Section
+
+// CreateUser creates a user in the system based on the provided user struct.
+func (c *Client4) CreateUser(user *User) (*User, *Response) {
+ if r, err := c.DoApiPost(c.GetUsersRoute(), user.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// CreateUserWithHash creates a user in the system based on the provided user struct and hash created.
+func (c *Client4) CreateUserWithHash(user *User, hash, data string) (*User, *Response) {
+ var query string
+ if hash != "" && data != "" {
+ query = fmt.Sprintf("?d=%v&h=%v", url.QueryEscape(data), hash)
+ } else {
+ err := NewAppError("MissingHashOrData", "api.user.create_user.missing_hash_or_data.app_error", nil, "", http.StatusBadRequest)
+ return nil, &Response{StatusCode: err.StatusCode, Error: err}
+ }
+ if r, err := c.DoApiPost(c.GetUsersRoute()+query, user.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// CreateUserWithInviteId creates a user in the system based on the provided invited id.
+func (c *Client4) CreateUserWithInviteId(user *User, inviteId string) (*User, *Response) {
+ var query string
+ if inviteId != "" {
+ query = fmt.Sprintf("?iid=%v", url.QueryEscape(inviteId))
+ } else {
+ err := NewAppError("MissingInviteId", "api.user.create_user.missing_invite_id.app_error", nil, "", http.StatusBadRequest)
+ return nil, &Response{StatusCode: err.StatusCode, Error: err}
+ }
+ if r, err := c.DoApiPost(c.GetUsersRoute()+query, user.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetMe returns the logged in user.
+func (c *Client4) GetMe(etag string) (*User, *Response) {
+ if r, err := c.DoApiGet(c.GetUserRoute(ME), etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetUser returns a user based on the provided user id string.
+func (c *Client4) GetUser(userId, etag string) (*User, *Response) {
+ if r, err := c.DoApiGet(c.GetUserRoute(userId), etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetUserByUsername returns a user based on the provided user name string.
+func (c *Client4) GetUserByUsername(userName, etag string) (*User, *Response) {
+ if r, err := c.DoApiGet(c.GetUserByUsernameRoute(userName), etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetUserByEmail returns a user based on the provided user email string.
+func (c *Client4) GetUserByEmail(email, etag string) (*User, *Response) {
+ if r, err := c.DoApiGet(c.GetUserByEmailRoute(email), etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// AutocompleteUsersInTeam returns the users on a team based on search term.
+func (c *Client4) AutocompleteUsersInTeam(teamId string, username string, etag string) (*UserAutocomplete, *Response) {
+ query := fmt.Sprintf("?in_team=%v&name=%v", teamId, username)
+ if r, err := c.DoApiGet(c.GetUsersRoute()+"/autocomplete"+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserAutocompleteFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// AutocompleteUsersInChannel returns the users in a channel based on search term.
+func (c *Client4) AutocompleteUsersInChannel(teamId string, channelId string, username string, etag string) (*UserAutocomplete, *Response) {
+ query := fmt.Sprintf("?in_team=%v&in_channel=%v&name=%v", teamId, channelId, username)
+ if r, err := c.DoApiGet(c.GetUsersRoute()+"/autocomplete"+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserAutocompleteFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// AutocompleteUsers returns the users in the system based on search term.
+func (c *Client4) AutocompleteUsers(username string, etag string) (*UserAutocomplete, *Response) {
+ query := fmt.Sprintf("?name=%v", username)
+ if r, err := c.DoApiGet(c.GetUsersRoute()+"/autocomplete"+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserAutocompleteFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetProfileImage gets user's profile image. Must be logged in or be a system administrator.
+func (c *Client4) GetProfileImage(userId, etag string) ([]byte, *Response) {
+ if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/image", etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+
+ if data, err := ioutil.ReadAll(r.Body); err != nil {
+ return nil, BuildErrorResponse(r, NewAppError("GetProfileImage", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode))
+ } else {
+ return data, BuildResponse(r)
+ }
+ }
+}
+
+// GetUsers returns a page of users on the system. Page counting starts at 0.
+func (c *Client4) GetUsers(page int, perPage int, etag string) ([]*User, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetUsersRoute()+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetUsersInTeam returns a page of users on a team. Page counting starts at 0.
+func (c *Client4) GetUsersInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response) {
+ query := fmt.Sprintf("?in_team=%v&page=%v&per_page=%v", teamId, page, perPage)
+ if r, err := c.DoApiGet(c.GetUsersRoute()+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetNewUsersInTeam returns a page of users on a team. Page counting starts at 0.
+func (c *Client4) GetNewUsersInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response) {
+ query := fmt.Sprintf("?sort=create_at&in_team=%v&page=%v&per_page=%v", teamId, page, perPage)
+ if r, err := c.DoApiGet(c.GetUsersRoute()+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetRecentlyActiveUsersInTeam returns a page of users on a team. Page counting starts at 0.
+func (c *Client4) GetRecentlyActiveUsersInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response) {
+ query := fmt.Sprintf("?sort=last_activity_at&in_team=%v&page=%v&per_page=%v", teamId, page, perPage)
+ if r, err := c.DoApiGet(c.GetUsersRoute()+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetUsersNotInTeam returns a page of users who are not in a team. Page counting starts at 0.
+func (c *Client4) GetUsersNotInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response) {
+ query := fmt.Sprintf("?not_in_team=%v&page=%v&per_page=%v", teamId, page, perPage)
+ if r, err := c.DoApiGet(c.GetUsersRoute()+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetUsersInChannel returns a page of users on a team. Page counting starts at 0.
+func (c *Client4) GetUsersInChannel(channelId string, page int, perPage int, etag string) ([]*User, *Response) {
+ query := fmt.Sprintf("?in_channel=%v&page=%v&per_page=%v", channelId, page, perPage)
+ if r, err := c.DoApiGet(c.GetUsersRoute()+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetUsersNotInChannel returns a page of users on a team. Page counting starts at 0.
+func (c *Client4) GetUsersNotInChannel(teamId, channelId string, page int, perPage int, etag string) ([]*User, *Response) {
+ query := fmt.Sprintf("?in_team=%v&not_in_channel=%v&page=%v&per_page=%v", teamId, channelId, page, perPage)
+ if r, err := c.DoApiGet(c.GetUsersRoute()+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetUsersWithoutTeam returns a page of users on the system that aren't on any teams. Page counting starts at 0.
+func (c *Client4) GetUsersWithoutTeam(page int, perPage int, etag string) ([]*User, *Response) {
+ query := fmt.Sprintf("?without_team=1&page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetUsersRoute()+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetUsersByIds returns a list of users based on the provided user ids.
+func (c *Client4) GetUsersByIds(userIds []string) ([]*User, *Response) {
+ if r, err := c.DoApiPost(c.GetUsersRoute()+"/ids", ArrayToJson(userIds)); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetUsersByUsernames returns a list of users based on the provided usernames.
+func (c *Client4) GetUsersByUsernames(usernames []string) ([]*User, *Response) {
+ if r, err := c.DoApiPost(c.GetUsersRoute()+"/usernames", ArrayToJson(usernames)); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// SearchUsers returns a list of users based on some search criteria.
+func (c *Client4) SearchUsers(search *UserSearch) ([]*User, *Response) {
+ if r, err := c.DoApiPost(c.GetUsersRoute()+"/search", search.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// UpdateUser updates a user in the system based on the provided user struct.
+func (c *Client4) UpdateUser(user *User) (*User, *Response) {
+ if r, err := c.DoApiPut(c.GetUserRoute(user.Id), user.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// PatchUser partially updates a user in the system. Any missing fields are not updated.
+func (c *Client4) PatchUser(userId string, patch *UserPatch) (*User, *Response) {
+ if r, err := c.DoApiPut(c.GetUserRoute(userId)+"/patch", patch.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// UpdateUserAuth updates a user AuthData (uthData, authService and password) in the system.
+func (c *Client4) UpdateUserAuth(userId string, userAuth *UserAuth) (*UserAuth, *Response) {
+ if r, err := c.DoApiPut(c.GetUserRoute(userId)+"/auth", userAuth.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserAuthFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// UpdateUserMfa activates multi-factor authentication for a user if activate
+// is true and a valid code is provided. If activate is false, then code is not
+// required and multi-factor authentication is disabled for the user.
+func (c *Client4) UpdateUserMfa(userId, code string, activate bool) (bool, *Response) {
+ requestBody := make(map[string]interface{})
+ requestBody["activate"] = activate
+ requestBody["code"] = code
+
+ if r, err := c.DoApiPut(c.GetUserRoute(userId)+"/mfa", StringInterfaceToJson(requestBody)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// CheckUserMfa checks whether a user has MFA active on their account or not based on the
+// provided login id.
+func (c *Client4) CheckUserMfa(loginId string) (bool, *Response) {
+ requestBody := make(map[string]interface{})
+ requestBody["login_id"] = loginId
+
+ if r, err := c.DoApiPost(c.GetUsersRoute()+"/mfa", StringInterfaceToJson(requestBody)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ data := StringInterfaceFromJson(r.Body)
+ if mfaRequired, ok := data["mfa_required"].(bool); !ok {
+ return false, BuildResponse(r)
+ } else {
+ return mfaRequired, BuildResponse(r)
+ }
+ }
+}
+
+// GenerateMfaSecret will generate a new MFA secret for a user and return it as a string and
+// as a base64 encoded image QR code.
+func (c *Client4) GenerateMfaSecret(userId string) (*MfaSecret, *Response) {
+ if r, err := c.DoApiPost(c.GetUserRoute(userId)+"/mfa/generate", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return MfaSecretFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// UpdateUserPassword updates a user's password. Must be logged in as the user or be a system administrator.
+func (c *Client4) UpdateUserPassword(userId, currentPassword, newPassword string) (bool, *Response) {
+ requestBody := map[string]string{"current_password": currentPassword, "new_password": newPassword}
+ if r, err := c.DoApiPut(c.GetUserRoute(userId)+"/password", MapToJson(requestBody)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// UpdateUserRoles updates a user's roles in the system. A user can have "system_user" and "system_admin" roles.
+func (c *Client4) UpdateUserRoles(userId, roles string) (bool, *Response) {
+ requestBody := map[string]string{"roles": roles}
+ if r, err := c.DoApiPut(c.GetUserRoute(userId)+"/roles", MapToJson(requestBody)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// UpdateUserActive updates status of a user whether active or not.
+func (c *Client4) UpdateUserActive(userId string, active bool) (bool, *Response) {
+ requestBody := make(map[string]interface{})
+ requestBody["active"] = active
+
+ if r, err := c.DoApiPut(c.GetUserRoute(userId)+"/active", StringInterfaceToJson(requestBody)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// DeleteUser deactivates a user in the system based on the provided user id string.
+func (c *Client4) DeleteUser(userId string) (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetUserRoute(userId)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// SendPasswordResetEmail will send a link for password resetting to a user with the
+// provided email.
+func (c *Client4) SendPasswordResetEmail(email string) (bool, *Response) {
+ requestBody := map[string]string{"email": email}
+ if r, err := c.DoApiPost(c.GetUsersRoute()+"/password/reset/send", MapToJson(requestBody)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// ResetPassword uses a recovery code to update reset a user's password.
+func (c *Client4) ResetPassword(token, newPassword string) (bool, *Response) {
+ requestBody := map[string]string{"token": token, "new_password": newPassword}
+ if r, err := c.DoApiPost(c.GetUsersRoute()+"/password/reset", MapToJson(requestBody)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// GetSessions returns a list of sessions based on the provided user id string.
+func (c *Client4) GetSessions(userId, etag string) ([]*Session, *Response) {
+ if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/sessions", etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return SessionsFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// RevokeSession revokes a user session based on the provided user id and session id strings.
+func (c *Client4) RevokeSession(userId, sessionId string) (bool, *Response) {
+ requestBody := map[string]string{"session_id": sessionId}
+ if r, err := c.DoApiPost(c.GetUserRoute(userId)+"/sessions/revoke", MapToJson(requestBody)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// RevokeAllSessions revokes all sessions for the provided user id string.
+func (c *Client4) RevokeAllSessions(userId string) (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetUserRoute(userId)+"/sessions/revoke/all", ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// AttachDeviceId attaches a mobile device ID to the current session.
+func (c *Client4) AttachDeviceId(deviceId string) (bool, *Response) {
+ requestBody := map[string]string{"device_id": deviceId}
+ if r, err := c.DoApiPut(c.GetUsersRoute()+"/sessions/device", MapToJson(requestBody)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// GetTeamsUnreadForUser will return an array with TeamUnread objects that contain the amount
+// of unread messages and mentions the current user has for the teams it belongs to.
+// An optional team ID can be set to exclude that team from the results. Must be authenticated.
+func (c *Client4) GetTeamsUnreadForUser(userId, teamIdToExclude string) ([]*TeamUnread, *Response) {
+ optional := ""
+ if teamIdToExclude != "" {
+ optional += fmt.Sprintf("?exclude_team=%s", url.QueryEscape(teamIdToExclude))
+ }
+
+ if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/teams/unread"+optional, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamsUnreadFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetUserAudits returns a list of audit based on the provided user id string.
+func (c *Client4) GetUserAudits(userId string, page int, perPage int, etag string) (Audits, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/audits"+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return AuditsFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// VerifyUserEmail will verify a user's email using the supplied token.
+func (c *Client4) VerifyUserEmail(token string) (bool, *Response) {
+ requestBody := map[string]string{"token": token}
+ if r, err := c.DoApiPost(c.GetUsersRoute()+"/email/verify", MapToJson(requestBody)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// SendVerificationEmail will send an email to the user with the provided email address, if
+// that user exists. The email will contain a link that can be used to verify the user's
+// email address.
+func (c *Client4) SendVerificationEmail(email string) (bool, *Response) {
+ requestBody := map[string]string{"email": email}
+ if r, err := c.DoApiPost(c.GetUsersRoute()+"/email/verify/send", MapToJson(requestBody)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// SetProfileImage sets profile image of the user
+func (c *Client4) SetProfileImage(userId string, data []byte) (bool, *Response) {
+ body := &bytes.Buffer{}
+ writer := multipart.NewWriter(body)
+
+ if part, err := writer.CreateFormFile("image", "profile.png"); err != nil {
+ return false, &Response{Error: NewAppError("SetProfileImage", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)}
+ } else if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil {
+ return false, &Response{Error: NewAppError("SetProfileImage", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ if err := writer.Close(); err != nil {
+ return false, &Response{Error: NewAppError("SetProfileImage", "model.client.set_profile_user.writer.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ rq, _ := http.NewRequest("POST", c.ApiUrl+c.GetUserRoute(userId)+"/image", bytes.NewReader(body.Bytes()))
+ rq.Header.Set("Content-Type", writer.FormDataContentType())
+ rq.Close = true
+
+ if len(c.AuthToken) > 0 {
+ rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken)
+ }
+
+ if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil {
+ // set to http.StatusForbidden(403)
+ return false, &Response{StatusCode: http.StatusForbidden, Error: NewAppError(c.GetUserRoute(userId)+"/image", "model.client.connecting.app_error", nil, err.Error(), 403)}
+ } else {
+ defer closeBody(rp)
+
+ if rp.StatusCode >= 300 {
+ return false, BuildErrorResponse(rp, AppErrorFromJson(rp.Body))
+ } else {
+ return CheckStatusOK(rp), BuildResponse(rp)
+ }
+ }
+}
+
+// CreateUserAccessToken will generate a user access token that can be used in place
+// of a session token to access the REST API. Must have the 'create_user_access_token'
+// permission and if generating for another user, must have the 'edit_other_users'
+// permission. A non-blank description is required.
+func (c *Client4) CreateUserAccessToken(userId, description string) (*UserAccessToken, *Response) {
+ requestBody := map[string]string{"description": description}
+ if r, err := c.DoApiPost(c.GetUserRoute(userId)+"/tokens", MapToJson(requestBody)); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserAccessTokenFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetUserAccessTokens will get a page of access tokens' id, description, is_active
+// and the user_id in the system. The actual token will not be returned. Must have
+// the 'manage_system' permission.
+func (c *Client4) GetUserAccessTokens(page int, perPage int) ([]*UserAccessToken, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetUserAccessTokensRoute()+query, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserAccessTokenListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetUserAccessToken will get a user access tokens' id, description, is_active
+// and the user_id of the user it is for. The actual token will not be returned.
+// Must have the 'read_user_access_token' permission and if getting for another
+// user, must have the 'edit_other_users' permission.
+func (c *Client4) GetUserAccessToken(tokenId string) (*UserAccessToken, *Response) {
+ if r, err := c.DoApiGet(c.GetUserAccessTokenRoute(tokenId), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserAccessTokenFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetUserAccessTokensForUser will get a paged list of user access tokens showing id,
+// description and user_id for each. The actual tokens will not be returned. Must have
+// the 'read_user_access_token' permission and if getting for another user, must have the
+// 'edit_other_users' permission.
+func (c *Client4) GetUserAccessTokensForUser(userId string, page, perPage int) ([]*UserAccessToken, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/tokens"+query, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserAccessTokenListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// RevokeUserAccessToken will revoke a user access token by id. Must have the
+// 'revoke_user_access_token' permission and if revoking for another user, must have the
+// 'edit_other_users' permission.
+func (c *Client4) RevokeUserAccessToken(tokenId string) (bool, *Response) {
+ requestBody := map[string]string{"token_id": tokenId}
+ if r, err := c.DoApiPost(c.GetUsersRoute()+"/tokens/revoke", MapToJson(requestBody)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// SearchUserAccessTokens returns user access tokens matching the provided search term.
+func (c *Client4) SearchUserAccessTokens(search *UserAccessTokenSearch) ([]*UserAccessToken, *Response) {
+ if r, err := c.DoApiPost(c.GetUsersRoute()+"/tokens/search", search.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return UserAccessTokenListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// DisableUserAccessToken will disable a user access token by id. Must have the
+// 'revoke_user_access_token' permission and if disabling for another user, must have the
+// 'edit_other_users' permission.
+func (c *Client4) DisableUserAccessToken(tokenId string) (bool, *Response) {
+ requestBody := map[string]string{"token_id": tokenId}
+ if r, err := c.DoApiPost(c.GetUsersRoute()+"/tokens/disable", MapToJson(requestBody)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// EnableUserAccessToken will enable a user access token by id. Must have the
+// 'create_user_access_token' permission and if enabling for another user, must have the
+// 'edit_other_users' permission.
+func (c *Client4) EnableUserAccessToken(tokenId string) (bool, *Response) {
+ requestBody := map[string]string{"token_id": tokenId}
+ if r, err := c.DoApiPost(c.GetUsersRoute()+"/tokens/enable", MapToJson(requestBody)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// Team Section
+
+// CreateTeam creates a team in the system based on the provided team struct.
+func (c *Client4) CreateTeam(team *Team) (*Team, *Response) {
+ if r, err := c.DoApiPost(c.GetTeamsRoute(), team.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetTeam returns a team based on the provided team id string.
+func (c *Client4) GetTeam(teamId, etag string) (*Team, *Response) {
+ if r, err := c.DoApiGet(c.GetTeamRoute(teamId), etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetAllTeams returns all teams based on permissions.
+func (c *Client4) GetAllTeams(etag string, page int, perPage int) ([]*Team, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetTeamsRoute()+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetTeamByName returns a team based on the provided team name string.
+func (c *Client4) GetTeamByName(name, etag string) (*Team, *Response) {
+ if r, err := c.DoApiGet(c.GetTeamByNameRoute(name), etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// SearchTeams returns teams matching the provided search term.
+func (c *Client4) SearchTeams(search *TeamSearch) ([]*Team, *Response) {
+ if r, err := c.DoApiPost(c.GetTeamsRoute()+"/search", search.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// TeamExists returns true or false if the team exist or not.
+func (c *Client4) TeamExists(name, etag string) (bool, *Response) {
+ if r, err := c.DoApiGet(c.GetTeamByNameRoute(name)+"/exists", etag); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return MapBoolFromJson(r.Body)["exists"], BuildResponse(r)
+ }
+}
+
+// GetTeamsForUser returns a list of teams a user is on. Must be logged in as the user
+// or be a system administrator.
+func (c *Client4) GetTeamsForUser(userId, etag string) ([]*Team, *Response) {
+ if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/teams", etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetTeamMember returns a team member based on the provided team and user id strings.
+func (c *Client4) GetTeamMember(teamId, userId, etag string) (*TeamMember, *Response) {
+ if r, err := c.DoApiGet(c.GetTeamMemberRoute(teamId, userId), etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamMemberFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// UpdateTeamMemberRoles will update the roles on a team for a user.
+func (c *Client4) UpdateTeamMemberRoles(teamId, userId, newRoles string) (bool, *Response) {
+ requestBody := map[string]string{"roles": newRoles}
+ if r, err := c.DoApiPut(c.GetTeamMemberRoute(teamId, userId)+"/roles", MapToJson(requestBody)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// UpdateTeam will update a team.
+func (c *Client4) UpdateTeam(team *Team) (*Team, *Response) {
+ if r, err := c.DoApiPut(c.GetTeamRoute(team.Id), team.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// PatchTeam partially updates a team. Any missing fields are not updated.
+func (c *Client4) PatchTeam(teamId string, patch *TeamPatch) (*Team, *Response) {
+ if r, err := c.DoApiPut(c.GetTeamRoute(teamId)+"/patch", patch.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// SoftDeleteTeam deletes the team softly (archive only, not permanent delete).
+func (c *Client4) SoftDeleteTeam(teamId string) (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetTeamRoute(teamId)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// PermanentDeleteTeam deletes the team, should only be used when needed for
+// compliance and the like
+func (c *Client4) PermanentDeleteTeam(teamId string) (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetTeamRoute(teamId) + "?permanent=true"); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// GetTeamMembers returns team members based on the provided team id string.
+func (c *Client4) GetTeamMembers(teamId string, page int, perPage int, etag string) ([]*TeamMember, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetTeamMembersRoute(teamId)+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamMembersFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetTeamMembersForUser returns the team members for a user.
+func (c *Client4) GetTeamMembersForUser(userId string, etag string) ([]*TeamMember, *Response) {
+ if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/teams/members", etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamMembersFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetTeamMembersByIds will return an array of team members based on the
+// team id and a list of user ids provided. Must be authenticated.
+func (c *Client4) GetTeamMembersByIds(teamId string, userIds []string) ([]*TeamMember, *Response) {
+ if r, err := c.DoApiPost(fmt.Sprintf("/teams/%v/members/ids", teamId), ArrayToJson(userIds)); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamMembersFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// AddTeamMember adds user to a team and return a team member.
+func (c *Client4) AddTeamMember(teamId, userId string) (*TeamMember, *Response) {
+ member := &TeamMember{TeamId: teamId, UserId: userId}
+
+ if r, err := c.DoApiPost(c.GetTeamMembersRoute(teamId), member.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamMemberFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// AddTeamMemberFromInvite adds a user to a team and return a team member using an invite id
+// or an invite hash/data pair.
+func (c *Client4) AddTeamMemberFromInvite(hash, dataToHash, inviteId string) (*TeamMember, *Response) {
+ var query string
+
+ if inviteId != "" {
+ query += fmt.Sprintf("?invite_id=%v", inviteId)
+ }
+
+ if hash != "" && dataToHash != "" {
+ query += fmt.Sprintf("?hash=%v&data=%v", hash, dataToHash)
+ }
+
+ if r, err := c.DoApiPost(c.GetTeamsRoute()+"/members/invite"+query, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamMemberFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// AddTeamMembers adds a number of users to a team and returns the team members.
+func (c *Client4) AddTeamMembers(teamId string, userIds []string) ([]*TeamMember, *Response) {
+ var members []*TeamMember
+ for _, userId := range userIds {
+ member := &TeamMember{TeamId: teamId, UserId: userId}
+ members = append(members, member)
+ }
+
+ if r, err := c.DoApiPost(c.GetTeamMembersRoute(teamId)+"/batch", TeamMembersToJson(members)); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamMembersFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// RemoveTeamMember will remove a user from a team.
+func (c *Client4) RemoveTeamMember(teamId, userId string) (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetTeamMemberRoute(teamId, userId)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// GetTeamStats returns a team stats based on the team id string.
+// Must be authenticated.
+func (c *Client4) GetTeamStats(teamId, etag string) (*TeamStats, *Response) {
+ if r, err := c.DoApiGet(c.GetTeamStatsRoute(teamId), etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamStatsFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetTeamUnread will return a TeamUnread object that contains the amount of
+// unread messages and mentions the user has for the specified team.
+// Must be authenticated.
+func (c *Client4) GetTeamUnread(teamId, userId string) (*TeamUnread, *Response) {
+ if r, err := c.DoApiGet(c.GetUserRoute(userId)+c.GetTeamRoute(teamId)+"/unread", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamUnreadFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// ImportTeam will import an exported team from other app into a existing team.
+func (c *Client4) ImportTeam(data []byte, filesize int, importFrom, filename, teamId string) (map[string]string, *Response) {
+ body := &bytes.Buffer{}
+ writer := multipart.NewWriter(body)
+
+ if part, err := writer.CreateFormFile("file", filename); err != nil {
+ return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), http.StatusBadRequest)}
+ } else if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil {
+ return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ if part, err := writer.CreateFormField("filesize"); err != nil {
+ return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.file_size.app_error", nil, err.Error(), http.StatusBadRequest)}
+ } else if _, err = io.Copy(part, strings.NewReader(strconv.Itoa(filesize))); err != nil {
+ return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.file_size.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ if part, err := writer.CreateFormField("importFrom"); err != nil {
+ return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.import_from.app_error", nil, err.Error(), http.StatusBadRequest)}
+ } else if _, err = io.Copy(part, strings.NewReader(importFrom)); err != nil {
+ return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.import_from.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ if err := writer.Close(); err != nil {
+ return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.writer.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ return c.DoUploadImportTeam(c.GetTeamImportRoute(teamId), body.Bytes(), writer.FormDataContentType())
+}
+
+// InviteUsersToTeam invite users by email to the team.
+func (c *Client4) InviteUsersToTeam(teamId string, userEmails []string) (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite/email", ArrayToJson(userEmails)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// GetTeamInviteInfo returns a team object from an invite id containing sanitized information.
+func (c *Client4) GetTeamInviteInfo(inviteId string) (*Team, *Response) {
+ if r, err := c.DoApiGet(c.GetTeamsRoute()+"/invite/"+inviteId, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return TeamFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// Channel Section
+
+// CreateChannel creates a channel based on the provided channel struct.
+func (c *Client4) CreateChannel(channel *Channel) (*Channel, *Response) {
+ if r, err := c.DoApiPost(c.GetChannelsRoute(), channel.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// UpdateChannel update a channel based on the provided channel struct.
+func (c *Client4) UpdateChannel(channel *Channel) (*Channel, *Response) {
+ if r, err := c.DoApiPut(c.GetChannelRoute(channel.Id), channel.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// PatchChannel partially updates a channel. Any missing fields are not updated.
+func (c *Client4) PatchChannel(channelId string, patch *ChannelPatch) (*Channel, *Response) {
+ if r, err := c.DoApiPut(c.GetChannelRoute(channelId)+"/patch", patch.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// RestoreChannel restores a previously deleted channel. Any missing fields are not updated.
+func (c *Client4) RestoreChannel(channelId string) (*Channel, *Response) {
+ if r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/restore", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// CreateDirectChannel creates a direct message channel based on the two user
+// ids provided.
+func (c *Client4) CreateDirectChannel(userId1, userId2 string) (*Channel, *Response) {
+ requestBody := []string{userId1, userId2}
+ if r, err := c.DoApiPost(c.GetChannelsRoute()+"/direct", ArrayToJson(requestBody)); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// CreateGroupChannel creates a group message channel based on userIds provided
+func (c *Client4) CreateGroupChannel(userIds []string) (*Channel, *Response) {
+ if r, err := c.DoApiPost(c.GetChannelsRoute()+"/group", ArrayToJson(userIds)); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetChannel returns a channel based on the provided channel id string.
+func (c *Client4) GetChannel(channelId, etag string) (*Channel, *Response) {
+ if r, err := c.DoApiGet(c.GetChannelRoute(channelId), etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetChannelStats returns statistics for a channel.
+func (c *Client4) GetChannelStats(channelId string, etag string) (*ChannelStats, *Response) {
+ if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/stats", etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelStatsFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetPinnedPosts gets a list of pinned posts.
+func (c *Client4) GetPinnedPosts(channelId string, etag string) (*PostList, *Response) {
+ if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/pinned", etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return PostListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetPublicChannelsForTeam returns a list of public channels based on the provided team id string.
+func (c *Client4) GetPublicChannelsForTeam(teamId string, page int, perPage int, etag string) ([]*Channel, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetChannelsForTeamRoute(teamId)+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelSliceFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetDeletedChannelsForTeam returns a list of public channels based on the provided team id string.
+func (c *Client4) GetDeletedChannelsForTeam(teamId string, page int, perPage int, etag string) ([]*Channel, *Response) {
+ query := fmt.Sprintf("/deleted?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetChannelsForTeamRoute(teamId)+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelSliceFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetPublicChannelsByIdsForTeam returns a list of public channels based on provided team id string
+func (c *Client4) GetPublicChannelsByIdsForTeam(teamId string, channelIds []string) ([]*Channel, *Response) {
+ if r, err := c.DoApiPost(c.GetChannelsForTeamRoute(teamId)+"/ids", ArrayToJson(channelIds)); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelSliceFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetChannelsForTeamForUser returns a list channels of on a team for a user.
+func (c *Client4) GetChannelsForTeamForUser(teamId, userId, etag string) ([]*Channel, *Response) {
+ if r, err := c.DoApiGet(c.GetUserRoute(userId)+c.GetTeamRoute(teamId)+"/channels", etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelSliceFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// SearchChannels returns the channels on a team matching the provided search term.
+func (c *Client4) SearchChannels(teamId string, search *ChannelSearch) ([]*Channel, *Response) {
+ if r, err := c.DoApiPost(c.GetChannelsForTeamRoute(teamId)+"/search", search.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelSliceFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// DeleteChannel deletes channel based on the provided channel id string.
+func (c *Client4) DeleteChannel(channelId string) (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetChannelRoute(channelId)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// GetChannelByName returns a channel based on the provided channel name and team id strings.
+func (c *Client4) GetChannelByName(channelName, teamId string, etag string) (*Channel, *Response) {
+ if r, err := c.DoApiGet(c.GetChannelByNameRoute(channelName, teamId), etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetChannelByNameForTeamName returns a channel based on the provided channel name and team name strings.
+func (c *Client4) GetChannelByNameForTeamName(channelName, teamName string, etag string) (*Channel, *Response) {
+ if r, err := c.DoApiGet(c.GetChannelByNameForTeamNameRoute(channelName, teamName), etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetChannelMembers gets a page of channel members.
+func (c *Client4) GetChannelMembers(channelId string, page, perPage int, etag string) (*ChannelMembers, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetChannelMembersRoute(channelId)+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelMembersFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetChannelMembersByIds gets the channel members in a channel for a list of user ids.
+func (c *Client4) GetChannelMembersByIds(channelId string, userIds []string) (*ChannelMembers, *Response) {
+ if r, err := c.DoApiPost(c.GetChannelMembersRoute(channelId)+"/ids", ArrayToJson(userIds)); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelMembersFromJson(r.Body), BuildResponse(r)
+
+ }
+}
+
+// GetChannelMember gets a channel member.
+func (c *Client4) GetChannelMember(channelId, userId, etag string) (*ChannelMember, *Response) {
+ if r, err := c.DoApiGet(c.GetChannelMemberRoute(channelId, userId), etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelMemberFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetChannelMembersForUser gets all the channel members for a user on a team.
+func (c *Client4) GetChannelMembersForUser(userId, teamId, etag string) (*ChannelMembers, *Response) {
+ if r, err := c.DoApiGet(fmt.Sprintf(c.GetUserRoute(userId)+"/teams/%v/channels/members", teamId), etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelMembersFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// ViewChannel performs a view action for a user. Synonymous with switching channels or marking channels as read by a user.
+func (c *Client4) ViewChannel(userId string, view *ChannelView) (*ChannelViewResponse, *Response) {
+ url := fmt.Sprintf(c.GetChannelsRoute()+"/members/%v/view", userId)
+ if r, err := c.DoApiPost(url, view.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelViewResponseFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetChannelUnread will return a ChannelUnread object that contains the number of
+// unread messages and mentions for a user.
+func (c *Client4) GetChannelUnread(channelId, userId string) (*ChannelUnread, *Response) {
+ if r, err := c.DoApiGet(c.GetUserRoute(userId)+c.GetChannelRoute(channelId)+"/unread", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelUnreadFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// UpdateChannelRoles will update the roles on a channel for a user.
+func (c *Client4) UpdateChannelRoles(channelId, userId, roles string) (bool, *Response) {
+ requestBody := map[string]string{"roles": roles}
+ if r, err := c.DoApiPut(c.GetChannelMemberRoute(channelId, userId)+"/roles", MapToJson(requestBody)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// UpdateChannelNotifyProps will update the notification properties on a channel for a user.
+func (c *Client4) UpdateChannelNotifyProps(channelId, userId string, props map[string]string) (bool, *Response) {
+ if r, err := c.DoApiPut(c.GetChannelMemberRoute(channelId, userId)+"/notify_props", MapToJson(props)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// AddChannelMember adds user to channel and return a channel member.
+func (c *Client4) AddChannelMember(channelId, userId string) (*ChannelMember, *Response) {
+ requestBody := map[string]string{"user_id": userId}
+ if r, err := c.DoApiPost(c.GetChannelMembersRoute(channelId)+"", MapToJson(requestBody)); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelMemberFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// AddChannelMemberWithRootId adds user to channel and return a channel member. Post add to channel message has the postRootId.
+func (c *Client4) AddChannelMemberWithRootId(channelId, userId, postRootId string) (*ChannelMember, *Response) {
+ requestBody := map[string]string{"user_id": userId, "post_root_id": postRootId}
+ if r, err := c.DoApiPost(c.GetChannelMembersRoute(channelId)+"", MapToJson(requestBody)); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ChannelMemberFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// RemoveUserFromChannel will delete the channel member object for a user, effectively removing the user from a channel.
+func (c *Client4) RemoveUserFromChannel(channelId, userId string) (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetChannelMemberRoute(channelId, userId)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// Post Section
+
+// CreatePost creates a post based on the provided post struct.
+func (c *Client4) CreatePost(post *Post) (*Post, *Response) {
+ if r, err := c.DoApiPost(c.GetPostsRoute(), post.ToUnsanitizedJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return PostFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// UpdatePost updates a post based on the provided post struct.
+func (c *Client4) UpdatePost(postId string, post *Post) (*Post, *Response) {
+ if r, err := c.DoApiPut(c.GetPostRoute(postId), post.ToUnsanitizedJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return PostFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// PatchPost partially updates a post. Any missing fields are not updated.
+func (c *Client4) PatchPost(postId string, patch *PostPatch) (*Post, *Response) {
+ if r, err := c.DoApiPut(c.GetPostRoute(postId)+"/patch", patch.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return PostFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// PinPost pin a post based on provided post id string.
+func (c *Client4) PinPost(postId string) (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetPostRoute(postId)+"/pin", ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// UnpinPost unpin a post based on provided post id string.
+func (c *Client4) UnpinPost(postId string) (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetPostRoute(postId)+"/unpin", ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// GetPost gets a single post.
+func (c *Client4) GetPost(postId string, etag string) (*Post, *Response) {
+ if r, err := c.DoApiGet(c.GetPostRoute(postId), etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return PostFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// DeletePost deletes a post from the provided post id string.
+func (c *Client4) DeletePost(postId string) (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetPostRoute(postId)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// GetPostThread gets a post with all the other posts in the same thread.
+func (c *Client4) GetPostThread(postId string, etag string) (*PostList, *Response) {
+ if r, err := c.DoApiGet(c.GetPostRoute(postId)+"/thread", etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return PostListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetPostsForChannel gets a page of posts with an array for ordering for a channel.
+func (c *Client4) GetPostsForChannel(channelId string, page, perPage int, etag string) (*PostList, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/posts"+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return PostListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetFlaggedPostsForUser returns flagged posts of a user based on user id string.
+func (c *Client4) GetFlaggedPostsForUser(userId string, page int, perPage int) (*PostList, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/posts/flagged"+query, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return PostListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetFlaggedPostsForUserInTeam returns flagged posts in team of a user based on user id string.
+func (c *Client4) GetFlaggedPostsForUserInTeam(userId string, teamId string, page int, perPage int) (*PostList, *Response) {
+ if len(teamId) == 0 || len(teamId) != 26 {
+ return nil, &Response{StatusCode: http.StatusBadRequest, Error: NewAppError("GetFlaggedPostsForUserInTeam", "model.client.get_flagged_posts_in_team.missing_parameter.app_error", nil, "", http.StatusBadRequest)}
+ }
+
+ query := fmt.Sprintf("?team_id=%v&page=%v&per_page=%v", teamId, page, perPage)
+ if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/posts/flagged"+query, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return PostListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetFlaggedPostsForUserInChannel returns flagged posts in channel of a user based on user id string.
+func (c *Client4) GetFlaggedPostsForUserInChannel(userId string, channelId string, page int, perPage int) (*PostList, *Response) {
+ if len(channelId) == 0 || len(channelId) != 26 {
+ return nil, &Response{StatusCode: http.StatusBadRequest, Error: NewAppError("GetFlaggedPostsForUserInChannel", "model.client.get_flagged_posts_in_channel.missing_parameter.app_error", nil, "", http.StatusBadRequest)}
+ }
+
+ query := fmt.Sprintf("?channel_id=%v&page=%v&per_page=%v", channelId, page, perPage)
+ if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/posts/flagged"+query, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return PostListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetPostsSince gets posts created after a specified time as Unix time in milliseconds.
+func (c *Client4) GetPostsSince(channelId string, time int64) (*PostList, *Response) {
+ query := fmt.Sprintf("?since=%v", time)
+ if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/posts"+query, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return PostListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetPostsAfter gets a page of posts that were posted after the post provided.
+func (c *Client4) GetPostsAfter(channelId, postId string, page, perPage int, etag string) (*PostList, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v&after=%v", page, perPage, postId)
+ if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/posts"+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return PostListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetPostsBefore gets a page of posts that were posted before the post provided.
+func (c *Client4) GetPostsBefore(channelId, postId string, page, perPage int, etag string) (*PostList, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v&before=%v", page, perPage, postId)
+ if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/posts"+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return PostListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// SearchPosts returns any posts with matching terms string.
+func (c *Client4) SearchPosts(teamId string, terms string, isOrSearch bool) (*PostList, *Response) {
+ requestBody := map[string]interface{}{"terms": terms, "is_or_search": isOrSearch}
+ if r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/posts/search", StringInterfaceToJson(requestBody)); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return PostListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// DoPostAction performs a post action.
+func (c *Client4) DoPostAction(postId, actionId string) (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetPostRoute(postId)+"/actions/"+actionId, ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// File Section
+
+// UploadFile will upload a file to a channel, to be later attached to a post.
+func (c *Client4) UploadFile(data []byte, channelId string, filename string) (*FileUploadResponse, *Response) {
+ body := &bytes.Buffer{}
+ writer := multipart.NewWriter(body)
+
+ if part, err := writer.CreateFormFile("files", filename); err != nil {
+ return nil, &Response{Error: NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), http.StatusBadRequest)}
+ } else if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil {
+ return nil, &Response{Error: NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ if part, err := writer.CreateFormField("channel_id"); err != nil {
+ return nil, &Response{Error: NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.channel_id.app_error", nil, err.Error(), http.StatusBadRequest)}
+ } else if _, err = io.Copy(part, strings.NewReader(channelId)); err != nil {
+ return nil, &Response{Error: NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.channel_id.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ if err := writer.Close(); err != nil {
+ return nil, &Response{Error: NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.writer.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ return c.DoUploadFile(c.GetFilesRoute(), body.Bytes(), writer.FormDataContentType())
+}
+
+// GetFile gets the bytes for a file by id.
+func (c *Client4) GetFile(fileId string) ([]byte, *Response) {
+ if r, err := c.DoApiGet(c.GetFileRoute(fileId), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+
+ if data, err := ioutil.ReadAll(r.Body); err != nil {
+ return nil, BuildErrorResponse(r, NewAppError("GetFile", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode))
+ } else {
+ return data, BuildResponse(r)
+ }
+ }
+}
+
+// DownloadFile gets the bytes for a file by id, optionally adding headers to force the browser to download it
+func (c *Client4) DownloadFile(fileId string, download bool) ([]byte, *Response) {
+ if r, err := c.DoApiGet(c.GetFileRoute(fileId)+fmt.Sprintf("?download=%v", download), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+
+ if data, err := ioutil.ReadAll(r.Body); err != nil {
+ return nil, BuildErrorResponse(r, NewAppError("DownloadFile", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode))
+ } else {
+ return data, BuildResponse(r)
+ }
+ }
+}
+
+// GetFileThumbnail gets the bytes for a file by id.
+func (c *Client4) GetFileThumbnail(fileId string) ([]byte, *Response) {
+ if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/thumbnail", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+
+ if data, err := ioutil.ReadAll(r.Body); err != nil {
+ return nil, BuildErrorResponse(r, NewAppError("GetFileThumbnail", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode))
+ } else {
+ return data, BuildResponse(r)
+ }
+ }
+}
+
+// DownloadFileThumbnail gets the bytes for a file by id, optionally adding headers to force the browser to download it.
+func (c *Client4) DownloadFileThumbnail(fileId string, download bool) ([]byte, *Response) {
+ if r, err := c.DoApiGet(c.GetFileRoute(fileId)+fmt.Sprintf("/thumbnail?download=%v", download), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+
+ if data, err := ioutil.ReadAll(r.Body); err != nil {
+ return nil, BuildErrorResponse(r, NewAppError("DownloadFileThumbnail", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode))
+ } else {
+ return data, BuildResponse(r)
+ }
+ }
+}
+
+// GetFileLink gets the public link of a file by id.
+func (c *Client4) GetFileLink(fileId string) (string, *Response) {
+ if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/link", ""); err != nil {
+ return "", BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+
+ return MapFromJson(r.Body)["link"], BuildResponse(r)
+ }
+}
+
+// GetFilePreview gets the bytes for a file by id.
+func (c *Client4) GetFilePreview(fileId string) ([]byte, *Response) {
+ if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/preview", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+
+ if data, err := ioutil.ReadAll(r.Body); err != nil {
+ return nil, BuildErrorResponse(r, NewAppError("GetFilePreview", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode))
+ } else {
+ return data, BuildResponse(r)
+ }
+ }
+}
+
+// DownloadFilePreview gets the bytes for a file by id.
+func (c *Client4) DownloadFilePreview(fileId string, download bool) ([]byte, *Response) {
+ if r, err := c.DoApiGet(c.GetFileRoute(fileId)+fmt.Sprintf("/preview?download=%v", download), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+
+ if data, err := ioutil.ReadAll(r.Body); err != nil {
+ return nil, BuildErrorResponse(r, NewAppError("DownloadFilePreview", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode))
+ } else {
+ return data, BuildResponse(r)
+ }
+ }
+}
+
+// GetFileInfo gets all the file info objects.
+func (c *Client4) GetFileInfo(fileId string) (*FileInfo, *Response) {
+ if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/info", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return FileInfoFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetFileInfosForPost gets all the file info objects attached to a post.
+func (c *Client4) GetFileInfosForPost(postId string, etag string) ([]*FileInfo, *Response) {
+ if r, err := c.DoApiGet(c.GetPostRoute(postId)+"/files/info", etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return FileInfosFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// General/System Section
+
+// GetPing will return ok if the running goRoutines are below the threshold and unhealthy for above.
+func (c *Client4) GetPing() (string, *Response) {
+ if r, err := c.DoApiGet(c.GetSystemRoute()+"/ping", ""); r != nil && r.StatusCode == 500 {
+ defer r.Body.Close()
+ return "unhealthy", BuildErrorResponse(r, err)
+ } else if err != nil {
+ return "", BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return MapFromJson(r.Body)["status"], BuildResponse(r)
+ }
+}
+
+// TestEmail will attempt to connect to the configured SMTP server.
+func (c *Client4) TestEmail() (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetTestEmailRoute(), ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// GetConfig will retrieve the server config with some sanitized items.
+func (c *Client4) GetConfig() (*Config, *Response) {
+ if r, err := c.DoApiGet(c.GetConfigRoute(), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ConfigFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// ReloadConfig will reload the server configuration.
+func (c *Client4) ReloadConfig() (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetConfigRoute()+"/reload", ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// GetOldClientConfig will retrieve the parts of the server configuration needed by the
+// client, formatted in the old format.
+func (c *Client4) GetOldClientConfig(etag string) (map[string]string, *Response) {
+ if r, err := c.DoApiGet(c.GetConfigRoute()+"/client?format=old", etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return MapFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetOldClientLicense will retrieve the parts of the server license needed by the
+// client, formatted in the old format.
+func (c *Client4) GetOldClientLicense(etag string) (map[string]string, *Response) {
+ if r, err := c.DoApiGet(c.GetLicenseRoute()+"/client?format=old", etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return MapFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// DatabaseRecycle will recycle the connections. Discard current connection and get new one.
+func (c *Client4) DatabaseRecycle() (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetDatabaseRoute()+"/recycle", ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// InvalidateCaches will purge the cache and can affect the performance while is cleaning.
+func (c *Client4) InvalidateCaches() (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetCacheRoute()+"/invalidate", ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// UpdateConfig will update the server configuration.
+func (c *Client4) UpdateConfig(config *Config) (*Config, *Response) {
+ if r, err := c.DoApiPut(c.GetConfigRoute(), config.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ConfigFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// UploadLicenseFile will add a license file to the system.
+func (c *Client4) UploadLicenseFile(data []byte) (bool, *Response) {
+ body := &bytes.Buffer{}
+ writer := multipart.NewWriter(body)
+
+ if part, err := writer.CreateFormFile("license", "test-license.mattermost-license"); err != nil {
+ return false, &Response{Error: NewAppError("UploadLicenseFile", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)}
+ } else if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil {
+ return false, &Response{Error: NewAppError("UploadLicenseFile", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ if err := writer.Close(); err != nil {
+ return false, &Response{Error: NewAppError("UploadLicenseFile", "model.client.set_profile_user.writer.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ rq, _ := http.NewRequest("POST", c.ApiUrl+c.GetLicenseRoute(), bytes.NewReader(body.Bytes()))
+ rq.Header.Set("Content-Type", writer.FormDataContentType())
+ rq.Close = true
+
+ if len(c.AuthToken) > 0 {
+ rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken)
+ }
+
+ if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil {
+ return false, &Response{StatusCode: http.StatusForbidden, Error: NewAppError(c.GetLicenseRoute(), "model.client.connecting.app_error", nil, err.Error(), http.StatusForbidden)}
+ } else {
+ defer closeBody(rp)
+
+ if rp.StatusCode >= 300 {
+ return false, BuildErrorResponse(rp, AppErrorFromJson(rp.Body))
+ } else {
+ return CheckStatusOK(rp), BuildResponse(rp)
+ }
+ }
+}
+
+// RemoveLicenseFile will remove the server license it exists. Note that this will
+// disable all enterprise features.
+func (c *Client4) RemoveLicenseFile() (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetLicenseRoute()); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// GetAnalyticsOld will retrieve analytics using the old format. New format is not
+// available but the "/analytics" endpoint is reserved for it. The "name" argument is optional
+// and defaults to "standard". The "teamId" argument is optional and will limit results
+// to a specific team.
+func (c *Client4) GetAnalyticsOld(name, teamId string) (AnalyticsRows, *Response) {
+ query := fmt.Sprintf("?name=%v&teamId=%v", name, teamId)
+ if r, err := c.DoApiGet(c.GetAnalyticsRoute()+"/old"+query, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return AnalyticsRowsFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// Webhooks Section
+
+// CreateIncomingWebhook creates an incoming webhook for a channel.
+func (c *Client4) CreateIncomingWebhook(hook *IncomingWebhook) (*IncomingWebhook, *Response) {
+ if r, err := c.DoApiPost(c.GetIncomingWebhooksRoute(), hook.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return IncomingWebhookFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// UpdateIncomingWebhook updates an incoming webhook for a channel.
+func (c *Client4) UpdateIncomingWebhook(hook *IncomingWebhook) (*IncomingWebhook, *Response) {
+ if r, err := c.DoApiPut(c.GetIncomingWebhookRoute(hook.Id), hook.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return IncomingWebhookFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetIncomingWebhooks returns a page of incoming webhooks on the system. Page counting starts at 0.
+func (c *Client4) GetIncomingWebhooks(page int, perPage int, etag string) ([]*IncomingWebhook, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetIncomingWebhooksRoute()+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return IncomingWebhookListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetIncomingWebhooksForTeam returns a page of incoming webhooks for a team. Page counting starts at 0.
+func (c *Client4) GetIncomingWebhooksForTeam(teamId string, page int, perPage int, etag string) ([]*IncomingWebhook, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v&team_id=%v", page, perPage, teamId)
+ if r, err := c.DoApiGet(c.GetIncomingWebhooksRoute()+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return IncomingWebhookListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetIncomingWebhook returns an Incoming webhook given the hook ID
+func (c *Client4) GetIncomingWebhook(hookID string, etag string) (*IncomingWebhook, *Response) {
+ if r, err := c.DoApiGet(c.GetIncomingWebhookRoute(hookID), etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return IncomingWebhookFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// DeleteIncomingWebhook deletes and Incoming Webhook given the hook ID
+func (c *Client4) DeleteIncomingWebhook(hookID string) (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetIncomingWebhookRoute(hookID)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// CreateOutgoingWebhook creates an outgoing webhook for a team or channel.
+func (c *Client4) CreateOutgoingWebhook(hook *OutgoingWebhook) (*OutgoingWebhook, *Response) {
+ if r, err := c.DoApiPost(c.GetOutgoingWebhooksRoute(), hook.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return OutgoingWebhookFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// UpdateOutgoingWebhook creates an outgoing webhook for a team or channel.
+func (c *Client4) UpdateOutgoingWebhook(hook *OutgoingWebhook) (*OutgoingWebhook, *Response) {
+ if r, err := c.DoApiPut(c.GetOutgoingWebhookRoute(hook.Id), hook.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return OutgoingWebhookFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetOutgoingWebhooks returns a page of outgoing webhooks on the system. Page counting starts at 0.
+func (c *Client4) GetOutgoingWebhooks(page int, perPage int, etag string) ([]*OutgoingWebhook, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetOutgoingWebhooksRoute()+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return OutgoingWebhookListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetOutgoingWebhook outgoing webhooks on the system requested by Hook Id.
+func (c *Client4) GetOutgoingWebhook(hookId string) (*OutgoingWebhook, *Response) {
+ if r, err := c.DoApiGet(c.GetOutgoingWebhookRoute(hookId), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return OutgoingWebhookFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetOutgoingWebhooksForChannel returns a page of outgoing webhooks for a channel. Page counting starts at 0.
+func (c *Client4) GetOutgoingWebhooksForChannel(channelId string, page int, perPage int, etag string) ([]*OutgoingWebhook, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v&channel_id=%v", page, perPage, channelId)
+ if r, err := c.DoApiGet(c.GetOutgoingWebhooksRoute()+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return OutgoingWebhookListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetOutgoingWebhooksForTeam returns a page of outgoing webhooks for a team. Page counting starts at 0.
+func (c *Client4) GetOutgoingWebhooksForTeam(teamId string, page int, perPage int, etag string) ([]*OutgoingWebhook, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v&team_id=%v", page, perPage, teamId)
+ if r, err := c.DoApiGet(c.GetOutgoingWebhooksRoute()+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return OutgoingWebhookListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// RegenOutgoingHookToken regenerate the outgoing webhook token.
+func (c *Client4) RegenOutgoingHookToken(hookId string) (*OutgoingWebhook, *Response) {
+ if r, err := c.DoApiPost(c.GetOutgoingWebhookRoute(hookId)+"/regen_token", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return OutgoingWebhookFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// DeleteOutgoingWebhook delete the outgoing webhook on the system requested by Hook Id.
+func (c *Client4) DeleteOutgoingWebhook(hookId string) (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetOutgoingWebhookRoute(hookId)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// Preferences Section
+
+// GetPreferences returns the user's preferences.
+func (c *Client4) GetPreferences(userId string) (Preferences, *Response) {
+ if r, err := c.DoApiGet(c.GetPreferencesRoute(userId), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ preferences, _ := PreferencesFromJson(r.Body)
+ defer closeBody(r)
+ return preferences, BuildResponse(r)
+ }
+}
+
+// UpdatePreferences saves the user's preferences.
+func (c *Client4) UpdatePreferences(userId string, preferences *Preferences) (bool, *Response) {
+ if r, err := c.DoApiPut(c.GetPreferencesRoute(userId), preferences.ToJson()); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return true, BuildResponse(r)
+ }
+}
+
+// DeletePreferences deletes the user's preferences.
+func (c *Client4) DeletePreferences(userId string, preferences *Preferences) (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetPreferencesRoute(userId)+"/delete", preferences.ToJson()); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return true, BuildResponse(r)
+ }
+}
+
+// GetPreferencesByCategory returns the user's preferences from the provided category string.
+func (c *Client4) GetPreferencesByCategory(userId string, category string) (Preferences, *Response) {
+ url := fmt.Sprintf(c.GetPreferencesRoute(userId)+"/%s", category)
+ if r, err := c.DoApiGet(url, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ preferences, _ := PreferencesFromJson(r.Body)
+ defer closeBody(r)
+ return preferences, BuildResponse(r)
+ }
+}
+
+// GetPreferenceByCategoryAndName returns the user's preferences from the provided category and preference name string.
+func (c *Client4) GetPreferenceByCategoryAndName(userId string, category string, preferenceName string) (*Preference, *Response) {
+ url := fmt.Sprintf(c.GetPreferencesRoute(userId)+"/%s/name/%v", category, preferenceName)
+ if r, err := c.DoApiGet(url, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return PreferenceFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// SAML Section
+
+// GetSamlMetadata returns metadata for the SAML configuration.
+func (c *Client4) GetSamlMetadata() (string, *Response) {
+ if r, err := c.DoApiGet(c.GetSamlRoute()+"/metadata", ""); err != nil {
+ return "", BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(r.Body)
+ return buf.String(), BuildResponse(r)
+ }
+}
+
+func samlFileToMultipart(data []byte, filename string) ([]byte, *multipart.Writer, error) {
+ body := &bytes.Buffer{}
+ writer := multipart.NewWriter(body)
+
+ if part, err := writer.CreateFormFile("certificate", filename); err != nil {
+ return nil, nil, err
+ } else if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil {
+ return nil, nil, err
+ }
+
+ if err := writer.Close(); err != nil {
+ return nil, nil, err
+ }
+
+ return body.Bytes(), writer, nil
+}
+
+// UploadSamlIdpCertificate will upload an IDP certificate for SAML and set the config to use it.
+func (c *Client4) UploadSamlIdpCertificate(data []byte, filename string) (bool, *Response) {
+ body, writer, err := samlFileToMultipart(data, filename)
+ if err != nil {
+ return false, &Response{Error: NewAppError("UploadSamlIdpCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ _, resp := c.DoUploadFile(c.GetSamlRoute()+"/certificate/idp", body, writer.FormDataContentType())
+ return resp.Error == nil, resp
+}
+
+// UploadSamlPublicCertificate will upload a public certificate for SAML and set the config to use it.
+func (c *Client4) UploadSamlPublicCertificate(data []byte, filename string) (bool, *Response) {
+ body, writer, err := samlFileToMultipart(data, filename)
+ if err != nil {
+ return false, &Response{Error: NewAppError("UploadSamlPublicCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ _, resp := c.DoUploadFile(c.GetSamlRoute()+"/certificate/public", body, writer.FormDataContentType())
+ return resp.Error == nil, resp
+}
+
+// UploadSamlPrivateCertificate will upload a private key for SAML and set the config to use it.
+func (c *Client4) UploadSamlPrivateCertificate(data []byte, filename string) (bool, *Response) {
+ body, writer, err := samlFileToMultipart(data, filename)
+ if err != nil {
+ return false, &Response{Error: NewAppError("UploadSamlPrivateCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ _, resp := c.DoUploadFile(c.GetSamlRoute()+"/certificate/private", body, writer.FormDataContentType())
+ return resp.Error == nil, resp
+}
+
+// DeleteSamlIdpCertificate deletes the SAML IDP certificate from the server and updates the config to not use it and disable SAML.
+func (c *Client4) DeleteSamlIdpCertificate() (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetSamlRoute() + "/certificate/idp"); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// DeleteSamlPublicCertificate deletes the SAML IDP certificate from the server and updates the config to not use it and disable SAML.
+func (c *Client4) DeleteSamlPublicCertificate() (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetSamlRoute() + "/certificate/public"); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// DeleteSamlPrivateCertificate deletes the SAML IDP certificate from the server and updates the config to not use it and disable SAML.
+func (c *Client4) DeleteSamlPrivateCertificate() (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetSamlRoute() + "/certificate/private"); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// GetSamlCertificateStatus returns metadata for the SAML configuration.
+func (c *Client4) GetSamlCertificateStatus() (*SamlCertificateStatus, *Response) {
+ if r, err := c.DoApiGet(c.GetSamlRoute()+"/certificate/status", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return SamlCertificateStatusFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// Compliance Section
+
+// CreateComplianceReport creates an incoming webhook for a channel.
+func (c *Client4) CreateComplianceReport(report *Compliance) (*Compliance, *Response) {
+ if r, err := c.DoApiPost(c.GetComplianceReportsRoute(), report.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ComplianceFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetComplianceReports returns list of compliance reports.
+func (c *Client4) GetComplianceReports(page, perPage int) (Compliances, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetComplianceReportsRoute()+query, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CompliancesFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetComplianceReport returns a compliance report.
+func (c *Client4) GetComplianceReport(reportId string) (*Compliance, *Response) {
+ if r, err := c.DoApiGet(c.GetComplianceReportRoute(reportId), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ComplianceFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// DownloadComplianceReport returns a full compliance report as a file.
+func (c *Client4) DownloadComplianceReport(reportId string) ([]byte, *Response) {
+ var rq *http.Request
+ rq, _ = http.NewRequest("GET", c.ApiUrl+c.GetComplianceReportRoute(reportId), nil)
+ rq.Close = true
+
+ if len(c.AuthToken) > 0 {
+ rq.Header.Set(HEADER_AUTH, "BEARER "+c.AuthToken)
+ }
+
+ if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil {
+ return nil, &Response{Error: NewAppError("DownloadComplianceReport", "model.client.connecting.app_error", nil, err.Error(), http.StatusBadRequest)}
+ } else {
+ defer closeBody(rp)
+
+ if rp.StatusCode >= 300 {
+ return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body))
+ } else if data, err := ioutil.ReadAll(rp.Body); err != nil {
+ return nil, BuildErrorResponse(rp, NewAppError("DownloadComplianceReport", "model.client.read_file.app_error", nil, err.Error(), rp.StatusCode))
+ } else {
+ return data, BuildResponse(rp)
+ }
+ }
+}
+
+// Cluster Section
+
+// GetClusterStatus returns the status of all the configured cluster nodes.
+func (c *Client4) GetClusterStatus() ([]*ClusterInfo, *Response) {
+ if r, err := c.DoApiGet(c.GetClusterRoute()+"/status", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ClusterInfosFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// LDAP Section
+
+// SyncLdap will force a sync with the configured LDAP server.
+func (c *Client4) SyncLdap() (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetLdapRoute()+"/sync", ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// TestLdap will attempt to connect to the configured LDAP server and return OK if configured
+// correctly.
+func (c *Client4) TestLdap() (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetLdapRoute()+"/test", ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// Audits Section
+
+// GetAudits returns a list of audits for the whole system.
+func (c *Client4) GetAudits(page int, perPage int, etag string) (Audits, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet("/audits"+query, etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return AuditsFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// Brand Section
+
+// GetBrandImage retrieves the previously uploaded brand image.
+func (c *Client4) GetBrandImage() ([]byte, *Response) {
+ if r, err := c.DoApiGet(c.GetBrandRoute()+"/image", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+
+ if r.StatusCode >= 300 {
+ return nil, BuildErrorResponse(r, AppErrorFromJson(r.Body))
+ } else if data, err := ioutil.ReadAll(r.Body); err != nil {
+ return nil, BuildErrorResponse(r, NewAppError("GetBrandImage", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode))
+ } else {
+ return data, BuildResponse(r)
+ }
+ }
+}
+
+// UploadBrandImage sets the brand image for the system.
+func (c *Client4) UploadBrandImage(data []byte) (bool, *Response) {
+ body := &bytes.Buffer{}
+ writer := multipart.NewWriter(body)
+
+ if part, err := writer.CreateFormFile("image", "brand.png"); err != nil {
+ return false, &Response{Error: NewAppError("UploadBrandImage", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)}
+ } else if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil {
+ return false, &Response{Error: NewAppError("UploadBrandImage", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ if err := writer.Close(); err != nil {
+ return false, &Response{Error: NewAppError("UploadBrandImage", "model.client.set_profile_user.writer.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ rq, _ := http.NewRequest("POST", c.ApiUrl+c.GetBrandRoute()+"/image", bytes.NewReader(body.Bytes()))
+ rq.Header.Set("Content-Type", writer.FormDataContentType())
+ rq.Close = true
+
+ if len(c.AuthToken) > 0 {
+ rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken)
+ }
+
+ if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil {
+ return false, &Response{StatusCode: http.StatusForbidden, Error: NewAppError(c.GetBrandRoute()+"/image", "model.client.connecting.app_error", nil, err.Error(), http.StatusForbidden)}
+ } else {
+ defer closeBody(rp)
+
+ if rp.StatusCode >= 300 {
+ return false, BuildErrorResponse(rp, AppErrorFromJson(rp.Body))
+ } else {
+ return CheckStatusOK(rp), BuildResponse(rp)
+ }
+ }
+}
+
+// Logs Section
+
+// GetLogs page of logs as a string array.
+func (c *Client4) GetLogs(page, perPage int) ([]string, *Response) {
+ query := fmt.Sprintf("?page=%v&logs_per_page=%v", page, perPage)
+ if r, err := c.DoApiGet("/logs"+query, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ArrayFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// PostLog is a convenience Web Service call so clients can log messages into
+// the server-side logs. For example we typically log javascript error messages
+// into the server-side. It returns the log message if the logging was successful.
+func (c *Client4) PostLog(message map[string]string) (map[string]string, *Response) {
+ if r, err := c.DoApiPost("/logs", MapToJson(message)); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return MapFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// OAuth Section
+
+// CreateOAuthApp will register a new OAuth 2.0 client application with Mattermost acting as an OAuth 2.0 service provider.
+func (c *Client4) CreateOAuthApp(app *OAuthApp) (*OAuthApp, *Response) {
+ if r, err := c.DoApiPost(c.GetOAuthAppsRoute(), app.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return OAuthAppFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// UpdateOAuthApp
+func (c *Client4) UpdateOAuthApp(app *OAuthApp) (*OAuthApp, *Response) {
+ if r, err := c.DoApiPut(c.GetOAuthAppRoute(app.Id), app.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return OAuthAppFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetOAuthApps gets a page of registered OAuth 2.0 client applications with Mattermost acting as an OAuth 2.0 service provider.
+func (c *Client4) GetOAuthApps(page, perPage int) ([]*OAuthApp, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetOAuthAppsRoute()+query, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return OAuthAppListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetOAuthApp gets a registered OAuth 2.0 client application with Mattermost acting as an OAuth 2.0 service provider.
+func (c *Client4) GetOAuthApp(appId string) (*OAuthApp, *Response) {
+ if r, err := c.DoApiGet(c.GetOAuthAppRoute(appId), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return OAuthAppFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetOAuthAppInfo gets a sanitized version of a registered OAuth 2.0 client application with Mattermost acting as an OAuth 2.0 service provider.
+func (c *Client4) GetOAuthAppInfo(appId string) (*OAuthApp, *Response) {
+ if r, err := c.DoApiGet(c.GetOAuthAppRoute(appId)+"/info", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return OAuthAppFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// DeleteOAuthApp deletes a registered OAuth 2.0 client application.
+func (c *Client4) DeleteOAuthApp(appId string) (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetOAuthAppRoute(appId)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// RegenerateOAuthAppSecret regenerates the client secret for a registered OAuth 2.0 client application.
+func (c *Client4) RegenerateOAuthAppSecret(appId string) (*OAuthApp, *Response) {
+ if r, err := c.DoApiPost(c.GetOAuthAppRoute(appId)+"/regen_secret", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return OAuthAppFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetAuthorizedOAuthAppsForUser gets a page of OAuth 2.0 client applications the user has authorized to use access their account.
+func (c *Client4) GetAuthorizedOAuthAppsForUser(userId string, page, perPage int) ([]*OAuthApp, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/oauth/apps/authorized"+query, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return OAuthAppListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// AuthorizeOAuthApp will authorize an OAuth 2.0 client application to access a user's account and provide a redirect link to follow.
+func (c *Client4) AuthorizeOAuthApp(authRequest *AuthorizeRequest) (string, *Response) {
+ if r, err := c.DoApiRequest(http.MethodPost, c.Url+"/oauth/authorize", authRequest.ToJson(), ""); err != nil {
+ return "", BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return MapFromJson(r.Body)["redirect"], BuildResponse(r)
+ }
+}
+
+// DeauthorizeOAuthApp will deauthorize an OAuth 2.0 client application from accessing a user's account.
+func (c *Client4) DeauthorizeOAuthApp(appId string) (bool, *Response) {
+ requestData := map[string]string{"client_id": appId}
+ if r, err := c.DoApiRequest(http.MethodPost, c.Url+"/oauth/deauthorize", MapToJson(requestData), ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// Elasticsearch Section
+
+// TestElasticsearch will attempt to connect to the configured Elasticsearch server and return OK if configured
+// correctly.
+func (c *Client4) TestElasticsearch() (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetElasticsearchRoute()+"/test", ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// PurgeElasticsearchIndexes immediately deletes all Elasticsearch indexes.
+func (c *Client4) PurgeElasticsearchIndexes() (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetElasticsearchRoute()+"/purge_indexes", ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// Data Retention Section
+
+// GetDataRetentionPolicy will get the current server data retention policy details.
+func (c *Client4) GetDataRetentionPolicy() (*DataRetentionPolicy, *Response) {
+ if r, err := c.DoApiGet(c.GetDataRetentionRoute()+"/policy", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return DataRetentionPolicyFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// Commands Section
+
+// CreateCommand will create a new command if the user have the right permissions.
+func (c *Client4) CreateCommand(cmd *Command) (*Command, *Response) {
+ if r, err := c.DoApiPost(c.GetCommandsRoute(), cmd.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CommandFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// UpdateCommand updates a command based on the provided Command struct
+func (c *Client4) UpdateCommand(cmd *Command) (*Command, *Response) {
+ if r, err := c.DoApiPut(c.GetCommandRoute(cmd.Id), cmd.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CommandFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// DeleteCommand deletes a command based on the provided command id string
+func (c *Client4) DeleteCommand(commandId string) (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetCommandRoute(commandId)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// ListCommands will retrieve a list of commands available in the team.
+func (c *Client4) ListCommands(teamId string, customOnly bool) ([]*Command, *Response) {
+ query := fmt.Sprintf("?team_id=%v&custom_only=%v", teamId, customOnly)
+ if r, err := c.DoApiGet(c.GetCommandsRoute()+query, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CommandListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// ExecuteCommand executes a given slash command.
+func (c *Client4) ExecuteCommand(channelId, command string) (*CommandResponse, *Response) {
+ commandArgs := &CommandArgs{
+ ChannelId: channelId,
+ Command: command,
+ }
+ if r, err := c.DoApiPost(c.GetCommandsRoute()+"/execute", commandArgs.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CommandResponseFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// ExecuteCommand executes a given slash command against the specified team
+// Use this when executing slash commands in a DM/GM, since the team id cannot be inferred in that case
+func (c *Client4) ExecuteCommandWithTeam(channelId, teamId, command string) (*CommandResponse, *Response) {
+ commandArgs := &CommandArgs{
+ ChannelId: channelId,
+ TeamId: teamId,
+ Command: command,
+ }
+ if r, err := c.DoApiPost(c.GetCommandsRoute()+"/execute", commandArgs.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CommandResponseFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// ListCommands will retrieve a list of commands available in the team.
+func (c *Client4) ListAutocompleteCommands(teamId string) ([]*Command, *Response) {
+ if r, err := c.DoApiGet(c.GetTeamAutoCompleteCommandsRoute(teamId), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CommandListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// RegenCommandToken will create a new token if the user have the right permissions.
+func (c *Client4) RegenCommandToken(commandId string) (string, *Response) {
+ if r, err := c.DoApiPut(c.GetCommandRoute(commandId)+"/regen_token", ""); err != nil {
+ return "", BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return MapFromJson(r.Body)["token"], BuildResponse(r)
+ }
+}
+
+// Status Section
+
+// GetUserStatus returns a user based on the provided user id string.
+func (c *Client4) GetUserStatus(userId, etag string) (*Status, *Response) {
+ if r, err := c.DoApiGet(c.GetUserStatusRoute(userId), etag); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return StatusFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetUsersStatusesByIds returns a list of users status based on the provided user ids.
+func (c *Client4) GetUsersStatusesByIds(userIds []string) ([]*Status, *Response) {
+ if r, err := c.DoApiPost(c.GetUserStatusesRoute()+"/ids", ArrayToJson(userIds)); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return StatusListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// UpdateUserStatus sets a user's status based on the provided user id string.
+func (c *Client4) UpdateUserStatus(userId string, userStatus *Status) (*Status, *Response) {
+ if r, err := c.DoApiPut(c.GetUserStatusRoute(userId), userStatus.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return StatusFromJson(r.Body), BuildResponse(r)
+
+ }
+}
+
+// Webrtc Section
+
+// GetWebrtcToken returns a valid token, stun server and turn server with credentials to
+// use with the Mattermost WebRTC service.
+func (c *Client4) GetWebrtcToken() (*WebrtcInfoResponse, *Response) {
+ if r, err := c.DoApiGet("/webrtc/token", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return WebrtcInfoResponseFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// Emoji Section
+
+// CreateEmoji will save an emoji to the server if the current user has permission
+// to do so. If successful, the provided emoji will be returned with its Id field
+// filled in. Otherwise, an error will be returned.
+func (c *Client4) CreateEmoji(emoji *Emoji, image []byte, filename string) (*Emoji, *Response) {
+ body := &bytes.Buffer{}
+ writer := multipart.NewWriter(body)
+
+ if part, err := writer.CreateFormFile("image", filename); err != nil {
+ return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error(), 0)}
+ } else if _, err = io.Copy(part, bytes.NewBuffer(image)); err != nil {
+ return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error(), 0)}
+ }
+
+ if err := writer.WriteField("emoji", emoji.ToJson()); err != nil {
+ return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError("CreateEmoji", "model.client.create_emoji.emoji.app_error", nil, err.Error(), 0)}
+ }
+
+ if err := writer.Close(); err != nil {
+ return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError("CreateEmoji", "model.client.create_emoji.writer.app_error", nil, err.Error(), 0)}
+ }
+
+ return c.DoEmojiUploadFile(c.GetEmojisRoute(), body.Bytes(), writer.FormDataContentType())
+}
+
+// GetEmojiList returns a page of custom emoji on the system.
+func (c *Client4) GetEmojiList(page, perPage int) ([]*Emoji, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ if r, err := c.DoApiGet(c.GetEmojisRoute()+query, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return EmojiListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetSortedEmojiList returns a page of custom emoji on the system sorted based on the sort
+// parameter, blank for no sorting and "name" to sort by emoji names.
+func (c *Client4) GetSortedEmojiList(page, perPage int, sort string) ([]*Emoji, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v&sort=%v", page, perPage, sort)
+ if r, err := c.DoApiGet(c.GetEmojisRoute()+query, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return EmojiListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// DeleteEmoji delete an custom emoji on the provided emoji id string.
+func (c *Client4) DeleteEmoji(emojiId string) (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetEmojiRoute(emojiId)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// GetEmoji returns a custom emoji based on the emojiId string.
+func (c *Client4) GetEmoji(emojiId string) (*Emoji, *Response) {
+ if r, err := c.DoApiGet(c.GetEmojiRoute(emojiId), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return EmojiFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetEmojiByName returns a custom emoji based on the name string.
+func (c *Client4) GetEmojiByName(name string) (*Emoji, *Response) {
+ if r, err := c.DoApiGet(c.GetEmojiByNameRoute(name), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return EmojiFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetEmojiImage returns the emoji image.
+func (c *Client4) GetEmojiImage(emojiId string) ([]byte, *Response) {
+ if r, err := c.DoApiGet(c.GetEmojiRoute(emojiId)+"/image", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+
+ if data, err := ioutil.ReadAll(r.Body); err != nil {
+ return nil, BuildErrorResponse(r, NewAppError("GetEmojiImage", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode))
+ } else {
+ return data, BuildResponse(r)
+ }
+ }
+}
+
+// SearchEmoji returns a list of emoji matching some search criteria.
+func (c *Client4) SearchEmoji(search *EmojiSearch) ([]*Emoji, *Response) {
+ if r, err := c.DoApiPost(c.GetEmojisRoute()+"/search", search.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return EmojiListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// AutocompleteEmoji returns a list of emoji starting with or matching name.
+func (c *Client4) AutocompleteEmoji(name string, etag string) ([]*Emoji, *Response) {
+ query := fmt.Sprintf("?name=%v", name)
+ if r, err := c.DoApiGet(c.GetEmojisRoute()+"/autocomplete"+query, ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return EmojiListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// Reaction Section
+
+// SaveReaction saves an emoji reaction for a post. Returns the saved reaction if successful, otherwise an error will be returned.
+func (c *Client4) SaveReaction(reaction *Reaction) (*Reaction, *Response) {
+ if r, err := c.DoApiPost(c.GetReactionsRoute(), reaction.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ReactionFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetReactions returns a list of reactions to a post.
+func (c *Client4) GetReactions(postId string) ([]*Reaction, *Response) {
+ if r, err := c.DoApiGet(c.GetPostRoute(postId)+"/reactions", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ReactionsFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// DeleteReaction deletes reaction of a user in a post.
+func (c *Client4) DeleteReaction(reaction *Reaction) (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetUserRoute(reaction.UserId) + c.GetPostRoute(reaction.PostId) + fmt.Sprintf("/reactions/%v", reaction.EmojiName)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// Open Graph Metadata Section
+
+// OpenGraph return the open graph metadata for a particular url if the site have the metadata
+func (c *Client4) OpenGraph(url string) (map[string]string, *Response) {
+ requestBody := make(map[string]string)
+ requestBody["url"] = url
+
+ if r, err := c.DoApiPost(c.GetOpenGraphRoute(), MapToJson(requestBody)); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return MapFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// Jobs Section
+
+// GetJob gets a single job.
+func (c *Client4) GetJob(id string) (*Job, *Response) {
+ if r, err := c.DoApiGet(c.GetJobsRoute()+fmt.Sprintf("/%v", id), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return JobFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// Get all jobs, sorted with the job that was created most recently first.
+func (c *Client4) GetJobs(page int, perPage int) ([]*Job, *Response) {
+ if r, err := c.DoApiGet(c.GetJobsRoute()+fmt.Sprintf("?page=%v&per_page=%v", page, perPage), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return JobsFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// GetJobsByType gets all jobs of a given type, sorted with the job that was created most recently first.
+func (c *Client4) GetJobsByType(jobType string, page int, perPage int) ([]*Job, *Response) {
+ if r, err := c.DoApiGet(c.GetJobsRoute()+fmt.Sprintf("/type/%v?page=%v&per_page=%v", jobType, page, perPage), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return JobsFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// CreateJob creates a job based on the provided job struct.
+func (c *Client4) CreateJob(job *Job) (*Job, *Response) {
+ if r, err := c.DoApiPost(c.GetJobsRoute(), job.ToJson()); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return JobFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// CancelJob requests the cancellation of the job with the provided Id.
+func (c *Client4) CancelJob(jobId string) (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetJobsRoute()+fmt.Sprintf("/%v/cancel", jobId), ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// Plugin Section
+
+// UploadPlugin takes an io.Reader stream pointing to the contents of a .tar.gz plugin.
+// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE.
+func (c *Client4) UploadPlugin(file io.Reader) (*Manifest, *Response) {
+ body := new(bytes.Buffer)
+ writer := multipart.NewWriter(body)
+
+ if part, err := writer.CreateFormFile("plugin", "plugin.tar.gz"); err != nil {
+ return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.writer.app_error", nil, err.Error(), 0)}
+ } else if _, err = io.Copy(part, file); err != nil {
+ return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.writer.app_error", nil, err.Error(), 0)}
+ }
+
+ if err := writer.Close(); err != nil {
+ return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.writer.app_error", nil, err.Error(), 0)}
+ }
+
+ rq, _ := http.NewRequest("POST", c.ApiUrl+c.GetPluginsRoute(), body)
+ rq.Header.Set("Content-Type", writer.FormDataContentType())
+ rq.Close = true
+
+ if len(c.AuthToken) > 0 {
+ rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken)
+ }
+
+ if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil {
+ return nil, BuildErrorResponse(rp, NewAppError("UploadPlugin", "model.client.connecting.app_error", nil, err.Error(), 0))
+ } else {
+ defer closeBody(rp)
+
+ if rp.StatusCode >= 300 {
+ return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body))
+ } else {
+ return ManifestFromJson(rp.Body), BuildResponse(rp)
+ }
+ }
+}
+
+// GetPlugins will return a list of plugin manifests for currently active plugins.
+// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE.
+func (c *Client4) GetPlugins() (*PluginsResponse, *Response) {
+ if r, err := c.DoApiGet(c.GetPluginsRoute(), ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return PluginsResponseFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// RemovePlugin will deactivate and delete a plugin.
+// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE.
+func (c *Client4) RemovePlugin(id string) (bool, *Response) {
+ if r, err := c.DoApiDelete(c.GetPluginRoute(id)); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// GetWebappPlugins will return a list of plugins that the webapp should download.
+// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE.
+func (c *Client4) GetWebappPlugins() ([]*Manifest, *Response) {
+ if r, err := c.DoApiGet(c.GetPluginsRoute()+"/webapp", ""); err != nil {
+ return nil, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return ManifestListFromJson(r.Body), BuildResponse(r)
+ }
+}
+
+// ActivatePlugin will activate an plugin installed.
+// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE.
+func (c *Client4) ActivatePlugin(id string) (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetPluginRoute(id)+"/activate", ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
+
+// DeactivatePlugin will deactivate an active plugin.
+// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE.
+func (c *Client4) DeactivatePlugin(id string) (bool, *Response) {
+ if r, err := c.DoApiPost(c.GetPluginRoute(id)+"/deactivate", ""); err != nil {
+ return false, BuildErrorResponse(r, err)
+ } else {
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+ }
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/cluster_discovery.go b/vendor/github.com/mattermost/mattermost-server/model/cluster_discovery.go
new file mode 100644
index 00000000..89e5fc95
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/cluster_discovery.go
@@ -0,0 +1,133 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "os"
+)
+
+const (
+ CDS_OFFLINE_AFTER_MILLIS = 1000 * 60 * 30 // 30 minutes
+ CDS_TYPE_APP = "mattermost_app"
+)
+
+type ClusterDiscovery struct {
+ Id string `json:"id"`
+ Type string `json:"type"`
+ ClusterName string `json:"cluster_name"`
+ Hostname string `json:"hostname"`
+ GossipPort int32 `json:"gossip_port"`
+ Port int32 `json:"port"`
+ CreateAt int64 `json:"create_at"`
+ LastPingAt int64 `json:"last_ping_at"`
+}
+
+func (o *ClusterDiscovery) PreSave() {
+ if o.Id == "" {
+ o.Id = NewId()
+ }
+
+ if o.CreateAt == 0 {
+ o.CreateAt = GetMillis()
+ o.LastPingAt = o.CreateAt
+ }
+}
+
+func (o *ClusterDiscovery) AutoFillHostname() {
+ // attempt to set the hostname from the OS
+ if len(o.Hostname) == 0 {
+ if hn, err := os.Hostname(); err == nil {
+ o.Hostname = hn
+ }
+ }
+}
+
+func (o *ClusterDiscovery) AutoFillIpAddress() {
+ // attempt to set the hostname to the first non-local IP address
+ if len(o.Hostname) == 0 {
+ o.Hostname = GetServerIpAddress()
+ }
+}
+
+func (o *ClusterDiscovery) IsEqual(in *ClusterDiscovery) bool {
+ if in == nil {
+ return false
+ }
+
+ if o.Type != in.Type {
+ return false
+ }
+
+ if o.ClusterName != in.ClusterName {
+ return false
+ }
+
+ if o.Hostname != in.Hostname {
+ return false
+ }
+
+ return true
+}
+
+func FilterClusterDiscovery(vs []*ClusterDiscovery, f func(*ClusterDiscovery) bool) []*ClusterDiscovery {
+ copy := make([]*ClusterDiscovery, 0)
+ for _, v := range vs {
+ if f(v) {
+ copy = append(copy, v)
+ }
+ }
+
+ return copy
+}
+
+func (o *ClusterDiscovery) IsValid() *AppError {
+ if len(o.Id) != 26 {
+ return NewAppError("Channel.IsValid", "model.channel.is_valid.id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.ClusterName) == 0 {
+ return NewAppError("ClusterDiscovery.IsValid", "ClusterName must be set", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.Type) == 0 {
+ return NewAppError("ClusterDiscovery.IsValid", "Type must be set", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.Hostname) == 0 {
+ return NewAppError("ClusterDiscovery.IsValid", "Hostname must be set", nil, "", http.StatusBadRequest)
+ }
+
+ if o.CreateAt == 0 {
+ return NewAppError("ClusterDiscovery.IsValid", "CreateAt must be set", nil, "", http.StatusBadRequest)
+ }
+
+ if o.LastPingAt == 0 {
+ return NewAppError("ClusterDiscovery.IsValid", "LastPingAt must be set", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (o *ClusterDiscovery) ToJson() string {
+ b, err := json.Marshal(o)
+ if err != nil {
+ return ""
+ }
+
+ return string(b)
+}
+
+func ClusterDiscoveryFromJson(data io.Reader) *ClusterDiscovery {
+ decoder := json.NewDecoder(data)
+ var me ClusterDiscovery
+ err := decoder.Decode(&me)
+ if err == nil {
+ return &me
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/cluster_info.go b/vendor/github.com/mattermost/mattermost-server/model/cluster_info.go
new file mode 100644
index 00000000..a8d63ec3
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/cluster_info.go
@@ -0,0 +1,50 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "strings"
+)
+
+type ClusterInfo struct {
+ Id string `json:"id"`
+ Version string `json:"version"`
+ ConfigHash string `json:"config_hash"`
+ IpAddress string `json:"ipaddress"`
+ Hostname string `json:"hostname"`
+}
+
+func (me *ClusterInfo) ToJson() string {
+ b, _ := json.Marshal(me)
+ return string(b)
+}
+
+func (me *ClusterInfo) Copy() *ClusterInfo {
+ json := me.ToJson()
+ return ClusterInfoFromJson(strings.NewReader(json))
+}
+
+func ClusterInfoFromJson(data io.Reader) *ClusterInfo {
+ var me *ClusterInfo
+ json.NewDecoder(data).Decode(&me)
+ return me
+}
+
+func ClusterInfosToJson(objmap []*ClusterInfo) string {
+ b, _ := json.Marshal(objmap)
+ return string(b)
+}
+
+func ClusterInfosFromJson(data io.Reader) []*ClusterInfo {
+ decoder := json.NewDecoder(data)
+
+ var objmap []*ClusterInfo
+ if err := decoder.Decode(&objmap); err != nil {
+ return make([]*ClusterInfo, 0)
+ } else {
+ return objmap
+ }
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/cluster_message.go b/vendor/github.com/mattermost/mattermost-server/model/cluster_message.go
new file mode 100644
index 00000000..f060c4ac
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/cluster_message.go
@@ -0,0 +1,46 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+const (
+ CLUSTER_EVENT_PUBLISH = "publish"
+ CLUSTER_EVENT_UPDATE_STATUS = "update_status"
+ CLUSTER_EVENT_INVALIDATE_ALL_CACHES = "inv_all_caches"
+ CLUSTER_EVENT_INVALIDATE_CACHE_FOR_REACTIONS = "inv_reactions"
+ CLUSTER_EVENT_INVALIDATE_CACHE_FOR_WEBHOOK = "inv_webhook"
+ CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_POSTS = "inv_channel_posts"
+ CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_MEMBERS_NOTIFY_PROPS = "inv_channel_members_notify_props"
+ CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_MEMBERS = "inv_channel_members"
+ CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_BY_NAME = "inv_channel_name"
+ CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL = "inv_channel"
+ CLUSTER_EVENT_INVALIDATE_CACHE_FOR_USER = "inv_user"
+ CLUSTER_EVENT_CLEAR_SESSION_CACHE_FOR_USER = "clear_session_user"
+
+ CLUSTER_SEND_BEST_EFFORT = "best_effort"
+ CLUSTER_SEND_RELIABLE = "reliable"
+)
+
+type ClusterMessage struct {
+ Event string `json:"event"`
+ SendType string `json:"-"`
+ WaitForAllToSend bool `json:"-"`
+ Data string `json:"data,omitempty"`
+ Props map[string]string `json:"props,omitempty"`
+}
+
+func (o *ClusterMessage) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func ClusterMessageFromJson(data io.Reader) *ClusterMessage {
+ var o *ClusterMessage
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/cluster_stats.go b/vendor/github.com/mattermost/mattermost-server/model/cluster_stats.go
new file mode 100644
index 00000000..064f7b81
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/cluster_stats.go
@@ -0,0 +1,27 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type ClusterStats struct {
+ Id string `json:"id"`
+ TotalWebsocketConnections int `json:"total_websocket_connections"`
+ TotalReadDbConnections int `json:"total_read_db_connections"`
+ TotalMasterDbConnections int `json:"total_master_db_connections"`
+}
+
+func (me *ClusterStats) ToJson() string {
+ b, _ := json.Marshal(me)
+ return string(b)
+}
+
+func ClusterStatsFromJson(data io.Reader) *ClusterStats {
+ var me *ClusterStats
+ json.NewDecoder(data).Decode(&me)
+ return me
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/command.go b/vendor/github.com/mattermost/mattermost-server/model/command.go
new file mode 100644
index 00000000..b23e5020
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/command.go
@@ -0,0 +1,139 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "strings"
+)
+
+const (
+ COMMAND_METHOD_POST = "P"
+ COMMAND_METHOD_GET = "G"
+ MIN_TRIGGER_LENGTH = 1
+ MAX_TRIGGER_LENGTH = 128
+)
+
+type Command struct {
+ Id string `json:"id"`
+ Token string `json:"token"`
+ CreateAt int64 `json:"create_at"`
+ UpdateAt int64 `json:"update_at"`
+ DeleteAt int64 `json:"delete_at"`
+ CreatorId string `json:"creator_id"`
+ TeamId string `json:"team_id"`
+ Trigger string `json:"trigger"`
+ Method string `json:"method"`
+ Username string `json:"username"`
+ IconURL string `json:"icon_url"`
+ AutoComplete bool `json:"auto_complete"`
+ AutoCompleteDesc string `json:"auto_complete_desc"`
+ AutoCompleteHint string `json:"auto_complete_hint"`
+ DisplayName string `json:"display_name"`
+ Description string `json:"description"`
+ URL string `json:"url"`
+}
+
+func (o *Command) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func CommandFromJson(data io.Reader) *Command {
+ var o *Command
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func CommandListToJson(l []*Command) string {
+ b, _ := json.Marshal(l)
+ return string(b)
+}
+
+func CommandListFromJson(data io.Reader) []*Command {
+ var o []*Command
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o *Command) IsValid() *AppError {
+
+ if len(o.Id) != 26 {
+ return NewAppError("Command.IsValid", "model.command.is_valid.id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.Token) != 26 {
+ return NewAppError("Command.IsValid", "model.command.is_valid.token.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if o.CreateAt == 0 {
+ return NewAppError("Command.IsValid", "model.command.is_valid.create_at.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if o.UpdateAt == 0 {
+ return NewAppError("Command.IsValid", "model.command.is_valid.update_at.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.CreatorId) != 26 {
+ return NewAppError("Command.IsValid", "model.command.is_valid.user_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.TeamId) != 26 {
+ return NewAppError("Command.IsValid", "model.command.is_valid.team_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.Trigger) < MIN_TRIGGER_LENGTH || len(o.Trigger) > MAX_TRIGGER_LENGTH || strings.Index(o.Trigger, "/") == 0 || strings.Contains(o.Trigger, " ") {
+ return NewAppError("Command.IsValid", "model.command.is_valid.trigger.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.URL) == 0 || len(o.URL) > 1024 {
+ return NewAppError("Command.IsValid", "model.command.is_valid.url.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if !IsValidHttpUrl(o.URL) {
+ return NewAppError("Command.IsValid", "model.command.is_valid.url_http.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if !(o.Method == COMMAND_METHOD_GET || o.Method == COMMAND_METHOD_POST) {
+ return NewAppError("Command.IsValid", "model.command.is_valid.method.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.DisplayName) > 64 {
+ return NewAppError("Command.IsValid", "model.command.is_valid.display_name.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.Description) > 128 {
+ return NewAppError("Command.IsValid", "model.command.is_valid.description.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (o *Command) PreSave() {
+ if o.Id == "" {
+ o.Id = NewId()
+ }
+
+ if o.Token == "" {
+ o.Token = NewId()
+ }
+
+ o.CreateAt = GetMillis()
+ o.UpdateAt = o.CreateAt
+}
+
+func (o *Command) PreUpdate() {
+ o.UpdateAt = GetMillis()
+}
+
+func (o *Command) Sanitize() {
+ o.Token = ""
+ o.CreatorId = ""
+ o.Method = ""
+ o.URL = ""
+ o.Username = ""
+ o.IconURL = ""
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/command_args.go b/vendor/github.com/mattermost/mattermost-server/model/command_args.go
new file mode 100644
index 00000000..4a635a1a
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/command_args.go
@@ -0,0 +1,34 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+
+ goi18n "github.com/nicksnyder/go-i18n/i18n"
+)
+
+type CommandArgs struct {
+ UserId string `json:"user_id"`
+ ChannelId string `json:"channel_id"`
+ TeamId string `json:"team_id"`
+ RootId string `json:"root_id"`
+ ParentId string `json:"parent_id"`
+ Command string `json:"command"`
+ SiteURL string `json:"-"`
+ T goi18n.TranslateFunc `json:"-"`
+ Session Session `json:"-"`
+}
+
+func (o *CommandArgs) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func CommandArgsFromJson(data io.Reader) *CommandArgs {
+ var o *CommandArgs
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/command_response.go b/vendor/github.com/mattermost/mattermost-server/model/command_response.go
new file mode 100644
index 00000000..cac7e845
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/command_response.go
@@ -0,0 +1,61 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "strings"
+)
+
+const (
+ COMMAND_RESPONSE_TYPE_IN_CHANNEL = "in_channel"
+ COMMAND_RESPONSE_TYPE_EPHEMERAL = "ephemeral"
+)
+
+type CommandResponse struct {
+ ResponseType string `json:"response_type"`
+ Text string `json:"text"`
+ Username string `json:"username"`
+ IconURL string `json:"icon_url"`
+ Type string `json:"type"`
+ Props StringInterface `json:"props"`
+ GotoLocation string `json:"goto_location"`
+ Attachments []*SlackAttachment `json:"attachments"`
+}
+
+func (o *CommandResponse) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func CommandResponseFromHTTPBody(contentType string, body io.Reader) *CommandResponse {
+ if strings.TrimSpace(strings.Split(contentType, ";")[0]) == "application/json" {
+ return CommandResponseFromJson(body)
+ }
+ if b, err := ioutil.ReadAll(body); err == nil {
+ return CommandResponseFromPlainText(string(b))
+ }
+ return nil
+}
+
+func CommandResponseFromPlainText(text string) *CommandResponse {
+ return &CommandResponse{
+ Text: text,
+ }
+}
+
+func CommandResponseFromJson(data io.Reader) *CommandResponse {
+ decoder := json.NewDecoder(data)
+ var o CommandResponse
+
+ if err := decoder.Decode(&o); err != nil {
+ return nil
+ }
+
+ o.Attachments = StringifySlackFieldValue(o.Attachments)
+
+ return &o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/command_webhook.go b/vendor/github.com/mattermost/mattermost-server/model/command_webhook.go
new file mode 100644
index 00000000..0b00e00b
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/command_webhook.go
@@ -0,0 +1,65 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "net/http"
+)
+
+type CommandWebhook struct {
+ Id string
+ CreateAt int64
+ CommandId string
+ UserId string
+ ChannelId string
+ RootId string
+ ParentId string
+ UseCount int
+}
+
+const (
+ COMMAND_WEBHOOK_LIFETIME = 1000 * 60 * 30
+)
+
+func (o *CommandWebhook) PreSave() {
+ if o.Id == "" {
+ o.Id = NewId()
+ }
+
+ if o.CreateAt == 0 {
+ o.CreateAt = GetMillis()
+ }
+}
+
+func (o *CommandWebhook) IsValid() *AppError {
+ if len(o.Id) != 26 {
+ return NewAppError("CommandWebhook.IsValid", "model.command_hook.id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if o.CreateAt == 0 {
+ return NewAppError("CommandWebhook.IsValid", "model.command_hook.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if len(o.CommandId) != 26 {
+ return NewAppError("CommandWebhook.IsValid", "model.command_hook.command_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.UserId) != 26 {
+ return NewAppError("CommandWebhook.IsValid", "model.command_hook.user_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.ChannelId) != 26 {
+ return NewAppError("CommandWebhook.IsValid", "model.command_hook.channel_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.RootId) != 0 && len(o.RootId) != 26 {
+ return NewAppError("CommandWebhook.IsValid", "model.command_hook.root_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.ParentId) != 0 && len(o.ParentId) != 26 {
+ return NewAppError("CommandWebhook.IsValid", "model.command_hook.parent_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/compliance.go b/vendor/github.com/mattermost/mattermost-server/model/compliance.go
new file mode 100644
index 00000000..5546b783
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/compliance.go
@@ -0,0 +1,119 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "strings"
+)
+
+const (
+ COMPLIANCE_STATUS_CREATED = "created"
+ COMPLIANCE_STATUS_RUNNING = "running"
+ COMPLIANCE_STATUS_FINISHED = "finished"
+ COMPLIANCE_STATUS_FAILED = "failed"
+ COMPLIANCE_STATUS_REMOVED = "removed"
+
+ COMPLIANCE_TYPE_DAILY = "daily"
+ COMPLIANCE_TYPE_ADHOC = "adhoc"
+)
+
+type Compliance struct {
+ Id string `json:"id"`
+ CreateAt int64 `json:"create_at"`
+ UserId string `json:"user_id"`
+ Status string `json:"status"`
+ Count int `json:"count"`
+ Desc string `json:"desc"`
+ Type string `json:"type"`
+ StartAt int64 `json:"start_at"`
+ EndAt int64 `json:"end_at"`
+ Keywords string `json:"keywords"`
+ Emails string `json:"emails"`
+}
+
+type Compliances []Compliance
+
+func (o *Compliance) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func (me *Compliance) PreSave() {
+ if me.Id == "" {
+ me.Id = NewId()
+ }
+
+ if me.Status == "" {
+ me.Status = COMPLIANCE_STATUS_CREATED
+ }
+
+ me.Count = 0
+ me.Emails = NormalizeEmail(me.Emails)
+ me.Keywords = strings.ToLower(me.Keywords)
+
+ me.CreateAt = GetMillis()
+}
+
+func (me *Compliance) JobName() string {
+ jobName := me.Type
+ if me.Type == COMPLIANCE_TYPE_DAILY {
+ jobName += "-" + me.Desc
+ }
+
+ jobName += "-" + me.Id
+
+ return jobName
+}
+
+func (me *Compliance) IsValid() *AppError {
+
+ if len(me.Id) != 26 {
+ return NewAppError("Compliance.IsValid", "model.compliance.is_valid.id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if me.CreateAt == 0 {
+ return NewAppError("Compliance.IsValid", "model.compliance.is_valid.create_at.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(me.Desc) > 512 || len(me.Desc) == 0 {
+ return NewAppError("Compliance.IsValid", "model.compliance.is_valid.desc.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if me.StartAt == 0 {
+ return NewAppError("Compliance.IsValid", "model.compliance.is_valid.start_at.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if me.EndAt == 0 {
+ return NewAppError("Compliance.IsValid", "model.compliance.is_valid.end_at.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if me.EndAt <= me.StartAt {
+ return NewAppError("Compliance.IsValid", "model.compliance.is_valid.start_end_at.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func ComplianceFromJson(data io.Reader) *Compliance {
+ var o *Compliance
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o Compliances) ToJson() string {
+ if b, err := json.Marshal(o); err != nil {
+ return "[]"
+ } else {
+ return string(b)
+ }
+}
+
+func CompliancesFromJson(data io.Reader) Compliances {
+ var o Compliances
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/compliance_post.go b/vendor/github.com/mattermost/mattermost-server/model/compliance_post.go
new file mode 100644
index 00000000..3751c586
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/compliance_post.go
@@ -0,0 +1,114 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "regexp"
+ "time"
+)
+
+type CompliancePost struct {
+
+ // From Team
+ TeamName string
+ TeamDisplayName string
+
+ // From Channel
+ ChannelName string
+ ChannelDisplayName string
+
+ // From User
+ UserUsername string
+ UserEmail string
+ UserNickname string
+
+ // From Post
+ PostId string
+ PostCreateAt int64
+ PostUpdateAt int64
+ PostDeleteAt int64
+ PostRootId string
+ PostParentId string
+ PostOriginalId string
+ PostMessage string
+ PostType string
+ PostProps string
+ PostHashtags string
+ PostFileIds string
+}
+
+func CompliancePostHeader() []string {
+ return []string{
+ "TeamName",
+ "TeamDisplayName",
+
+ "ChannelName",
+ "ChannelDisplayName",
+
+ "UserUsername",
+ "UserEmail",
+ "UserNickname",
+
+ "PostId",
+ "PostCreateAt",
+ "PostUpdateAt",
+ "PostDeleteAt",
+ "PostRootId",
+ "PostParentId",
+ "PostOriginalId",
+ "PostMessage",
+ "PostType",
+ "PostProps",
+ "PostHashtags",
+ "PostFileIds",
+ }
+}
+
+func cleanComplianceStrings(in string) string {
+ if matched, _ := regexp.MatchString("^\\s*(=|\\+|\\-)", in); matched {
+ return "'" + in
+
+ } else {
+ return in
+ }
+}
+
+func (me *CompliancePost) Row() []string {
+
+ postDeleteAt := ""
+ if me.PostDeleteAt > 0 {
+ postDeleteAt = time.Unix(0, me.PostDeleteAt*int64(1000*1000)).Format(time.RFC3339)
+ }
+
+ postUpdateAt := ""
+ if me.PostUpdateAt != me.PostCreateAt {
+ postUpdateAt = time.Unix(0, me.PostUpdateAt*int64(1000*1000)).Format(time.RFC3339)
+ }
+
+ return []string{
+ cleanComplianceStrings(me.TeamName),
+ cleanComplianceStrings(me.TeamDisplayName),
+
+ cleanComplianceStrings(me.ChannelName),
+ cleanComplianceStrings(me.ChannelDisplayName),
+
+ cleanComplianceStrings(me.UserUsername),
+ cleanComplianceStrings(me.UserEmail),
+ cleanComplianceStrings(me.UserNickname),
+
+ me.PostId,
+ time.Unix(0, me.PostCreateAt*int64(1000*1000)).Format(time.RFC3339),
+ postUpdateAt,
+ postDeleteAt,
+
+ me.PostRootId,
+ me.PostParentId,
+ me.PostOriginalId,
+ cleanComplianceStrings(me.PostMessage),
+ me.PostType,
+ me.PostProps,
+ me.PostHashtags,
+ me.PostFileIds,
+ }
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/config.go b/vendor/github.com/mattermost/mattermost-server/model/config.go
new file mode 100644
index 00000000..9010eaea
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/config.go
@@ -0,0 +1,2238 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+const (
+ CONN_SECURITY_NONE = ""
+ CONN_SECURITY_PLAIN = "PLAIN"
+ CONN_SECURITY_TLS = "TLS"
+ CONN_SECURITY_STARTTLS = "STARTTLS"
+
+ IMAGE_DRIVER_LOCAL = "local"
+ IMAGE_DRIVER_S3 = "amazons3"
+
+ DATABASE_DRIVER_MYSQL = "mysql"
+ DATABASE_DRIVER_POSTGRES = "postgres"
+
+ MINIO_ACCESS_KEY = "minioaccesskey"
+ MINIO_SECRET_KEY = "miniosecretkey"
+ MINIO_BUCKET = "mattermost-test"
+
+ PASSWORD_MAXIMUM_LENGTH = 64
+ PASSWORD_MINIMUM_LENGTH = 5
+
+ SERVICE_GITLAB = "gitlab"
+ SERVICE_GOOGLE = "google"
+ SERVICE_OFFICE365 = "office365"
+
+ WEBSERVER_MODE_REGULAR = "regular"
+ WEBSERVER_MODE_GZIP = "gzip"
+ WEBSERVER_MODE_DISABLED = "disabled"
+
+ GENERIC_NO_CHANNEL_NOTIFICATION = "generic_no_channel"
+ GENERIC_NOTIFICATION = "generic"
+ FULL_NOTIFICATION = "full"
+
+ DIRECT_MESSAGE_ANY = "any"
+ DIRECT_MESSAGE_TEAM = "team"
+
+ SHOW_USERNAME = "username"
+ SHOW_NICKNAME_FULLNAME = "nickname_full_name"
+ SHOW_FULLNAME = "full_name"
+
+ PERMISSIONS_ALL = "all"
+ PERMISSIONS_CHANNEL_ADMIN = "channel_admin"
+ PERMISSIONS_TEAM_ADMIN = "team_admin"
+ PERMISSIONS_SYSTEM_ADMIN = "system_admin"
+
+ FAKE_SETTING = "********************************"
+
+ RESTRICT_EMOJI_CREATION_ALL = "all"
+ RESTRICT_EMOJI_CREATION_ADMIN = "admin"
+ RESTRICT_EMOJI_CREATION_SYSTEM_ADMIN = "system_admin"
+
+ PERMISSIONS_DELETE_POST_ALL = "all"
+ PERMISSIONS_DELETE_POST_TEAM_ADMIN = "team_admin"
+ PERMISSIONS_DELETE_POST_SYSTEM_ADMIN = "system_admin"
+
+ ALLOW_EDIT_POST_ALWAYS = "always"
+ ALLOW_EDIT_POST_NEVER = "never"
+ ALLOW_EDIT_POST_TIME_LIMIT = "time_limit"
+
+ GROUP_UNREAD_CHANNELS_DISABLED = "disabled"
+ GROUP_UNREAD_CHANNELS_DEFAULT_ON = "default_on"
+ GROUP_UNREAD_CHANNELS_DEFAULT_OFF = "default_off"
+
+ EMAIL_BATCHING_BUFFER_SIZE = 256
+ EMAIL_BATCHING_INTERVAL = 30
+
+ EMAIL_NOTIFICATION_CONTENTS_FULL = "full"
+ EMAIL_NOTIFICATION_CONTENTS_GENERIC = "generic"
+
+ SITENAME_MAX_LENGTH = 30
+
+ SERVICE_SETTINGS_DEFAULT_SITE_URL = ""
+ SERVICE_SETTINGS_DEFAULT_TLS_CERT_FILE = ""
+ SERVICE_SETTINGS_DEFAULT_TLS_KEY_FILE = ""
+ SERVICE_SETTINGS_DEFAULT_READ_TIMEOUT = 300
+ SERVICE_SETTINGS_DEFAULT_WRITE_TIMEOUT = 300
+ SERVICE_SETTINGS_DEFAULT_MAX_LOGIN_ATTEMPTS = 10
+ SERVICE_SETTINGS_DEFAULT_ALLOW_CORS_FROM = ""
+ SERVICE_SETTINGS_DEFAULT_LISTEN_AND_ADDRESS = ":8065"
+
+ TEAM_SETTINGS_DEFAULT_MAX_USERS_PER_TEAM = 50
+ TEAM_SETTINGS_DEFAULT_CUSTOM_BRAND_TEXT = ""
+ TEAM_SETTINGS_DEFAULT_CUSTOM_DESCRIPTION_TEXT = ""
+ TEAM_SETTINGS_DEFAULT_USER_STATUS_AWAY_TIMEOUT = 300
+
+ SQL_SETTINGS_DEFAULT_DATA_SOURCE = "mmuser:mostest@tcp(dockerhost:3306)/mattermost_test?charset=utf8mb4,utf8&readTimeout=30s&writeTimeout=30s"
+
+ EMAIL_SETTINGS_DEFAULT_FEEDBACK_ORGANIZATION = ""
+
+ SUPPORT_SETTINGS_DEFAULT_TERMS_OF_SERVICE_LINK = "https://about.mattermost.com/default-terms/"
+ SUPPORT_SETTINGS_DEFAULT_PRIVACY_POLICY_LINK = "https://about.mattermost.com/default-privacy-policy/"
+ SUPPORT_SETTINGS_DEFAULT_ABOUT_LINK = "https://about.mattermost.com/default-about/"
+ SUPPORT_SETTINGS_DEFAULT_HELP_LINK = "https://about.mattermost.com/default-help/"
+ SUPPORT_SETTINGS_DEFAULT_REPORT_A_PROBLEM_LINK = "https://about.mattermost.com/default-report-a-problem/"
+ SUPPORT_SETTINGS_DEFAULT_ADMINISTRATORS_GUIDE_LINK = "https://about.mattermost.com/administrators-guide/"
+ SUPPORT_SETTINGS_DEFAULT_TROUBLESHOOTING_FORUM_LINK = "https://about.mattermost.com/troubleshooting-forum/"
+ SUPPORT_SETTINGS_DEFAULT_COMMERCIAL_SUPPORT_LINK = "https://about.mattermost.com/commercial-support/"
+ SUPPORT_SETTINGS_DEFAULT_SUPPORT_EMAIL = "feedback@mattermost.com"
+
+ LDAP_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE = ""
+ LDAP_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE = ""
+ LDAP_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE = ""
+ LDAP_SETTINGS_DEFAULT_USERNAME_ATTRIBUTE = ""
+ LDAP_SETTINGS_DEFAULT_NICKNAME_ATTRIBUTE = ""
+ LDAP_SETTINGS_DEFAULT_ID_ATTRIBUTE = ""
+ LDAP_SETTINGS_DEFAULT_POSITION_ATTRIBUTE = ""
+ LDAP_SETTINGS_DEFAULT_LOGIN_FIELD_NAME = ""
+
+ SAML_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE = ""
+ SAML_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE = ""
+ SAML_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE = ""
+ SAML_SETTINGS_DEFAULT_USERNAME_ATTRIBUTE = ""
+ SAML_SETTINGS_DEFAULT_NICKNAME_ATTRIBUTE = ""
+ SAML_SETTINGS_DEFAULT_LOCALE_ATTRIBUTE = ""
+ SAML_SETTINGS_DEFAULT_POSITION_ATTRIBUTE = ""
+
+ NATIVEAPP_SETTINGS_DEFAULT_APP_DOWNLOAD_LINK = "https://about.mattermost.com/downloads/"
+ NATIVEAPP_SETTINGS_DEFAULT_ANDROID_APP_DOWNLOAD_LINK = "https://about.mattermost.com/mattermost-android-app/"
+ NATIVEAPP_SETTINGS_DEFAULT_IOS_APP_DOWNLOAD_LINK = "https://about.mattermost.com/mattermost-ios-app/"
+
+ WEBRTC_SETTINGS_DEFAULT_STUN_URI = ""
+ WEBRTC_SETTINGS_DEFAULT_TURN_URI = ""
+
+ ANALYTICS_SETTINGS_DEFAULT_MAX_USERS_FOR_STATISTICS = 2500
+
+ ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_COLOR = "#f2a93b"
+ ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_TEXT_COLOR = "#333333"
+
+ TEAM_SETTINGS_DEFAULT_TEAM_TEXT = "default"
+
+ ELASTICSEARCH_SETTINGS_DEFAULT_CONNECTION_URL = ""
+ ELASTICSEARCH_SETTINGS_DEFAULT_USERNAME = ""
+ ELASTICSEARCH_SETTINGS_DEFAULT_PASSWORD = ""
+ ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_REPLICAS = 1
+ ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_SHARDS = 1
+ ELASTICSEARCH_SETTINGS_DEFAULT_AGGREGATE_POSTS_AFTER_DAYS = 365
+ ELASTICSEARCH_SETTINGS_DEFAULT_POSTS_AGGREGATOR_JOB_START_TIME = "03:00"
+ ELASTICSEARCH_SETTINGS_DEFAULT_INDEX_PREFIX = ""
+ ELASTICSEARCH_SETTINGS_DEFAULT_LIVE_INDEXING_BATCH_SIZE = 1
+ ELASTICSEARCH_SETTINGS_DEFAULT_BULK_INDEXING_TIME_WINDOW_SECONDS = 3600
+ ELASTICSEARCH_SETTINGS_DEFAULT_REQUEST_TIMEOUT_SECONDS = 30
+
+ DATA_RETENTION_SETTINGS_DEFAULT_MESSAGE_RETENTION_DAYS = 365
+ DATA_RETENTION_SETTINGS_DEFAULT_FILE_RETENTION_DAYS = 365
+ DATA_RETENTION_SETTINGS_DEFAULT_DELETION_JOB_START_TIME = "02:00"
+
+ PLUGIN_SETTINGS_DEFAULT_DIRECTORY = "./plugins"
+ PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY = "./client/plugins"
+
+ COMPLIANCE_EXPORT_TYPE_ACTIANCE = "actiance"
+ COMPLIANCE_EXPORT_TYPE_GLOBALRELAY = "globalrelay"
+)
+
+type ServiceSettings struct {
+ SiteURL *string
+ LicenseFileLocation *string
+ ListenAddress *string
+ ConnectionSecurity *string
+ TLSCertFile *string
+ TLSKeyFile *string
+ UseLetsEncrypt *bool
+ LetsEncryptCertificateCacheFile *string
+ Forward80To443 *bool
+ ReadTimeout *int
+ WriteTimeout *int
+ MaximumLoginAttempts *int
+ GoroutineHealthThreshold *int
+ GoogleDeveloperKey string
+ EnableOAuthServiceProvider bool
+ EnableIncomingWebhooks bool
+ EnableOutgoingWebhooks bool
+ EnableCommands *bool
+ EnableOnlyAdminIntegrations *bool
+ EnablePostUsernameOverride bool
+ EnablePostIconOverride bool
+ EnableAPIv3 *bool
+ EnableLinkPreviews *bool
+ EnableTesting bool
+ EnableDeveloper *bool
+ EnableSecurityFixAlert *bool
+ EnableInsecureOutgoingConnections *bool
+ AllowedUntrustedInternalConnections *string
+ EnableMultifactorAuthentication *bool
+ EnforceMultifactorAuthentication *bool
+ EnableUserAccessTokens *bool
+ AllowCorsFrom *string
+ SessionLengthWebInDays *int
+ SessionLengthMobileInDays *int
+ SessionLengthSSOInDays *int
+ SessionCacheInMinutes *int
+ SessionIdleTimeoutInMinutes *int
+ WebsocketSecurePort *int
+ WebsocketPort *int
+ WebserverMode *string
+ EnableCustomEmoji *bool
+ EnableEmojiPicker *bool
+ RestrictCustomEmojiCreation *string
+ RestrictPostDelete *string
+ AllowEditPost *string
+ PostEditTimeLimit *int
+ TimeBetweenUserTypingUpdatesMilliseconds *int64
+ EnablePostSearch *bool
+ EnableUserTypingMessages *bool
+ EnableChannelViewedMessages *bool
+ EnableUserStatuses *bool
+ ExperimentalEnableAuthenticationTransfer *bool
+ ClusterLogTimeoutMilliseconds *int
+ CloseUnusedDirectMessages *bool
+ EnablePreviewFeatures *bool
+ EnableTutorial *bool
+ ExperimentalEnableDefaultChannelLeaveJoinMessages *bool
+ ExperimentalGroupUnreadChannels *string
+ ImageProxyType *string
+ ImageProxyURL *string
+ ImageProxyOptions *string
+}
+
+func (s *ServiceSettings) SetDefaults() {
+ if s.SiteURL == nil {
+ s.SiteURL = NewString(SERVICE_SETTINGS_DEFAULT_SITE_URL)
+ }
+
+ if s.LicenseFileLocation == nil {
+ s.LicenseFileLocation = NewString("")
+ }
+
+ if s.ListenAddress == nil {
+ s.ListenAddress = NewString(SERVICE_SETTINGS_DEFAULT_LISTEN_AND_ADDRESS)
+ }
+
+ if s.EnableAPIv3 == nil {
+ s.EnableAPIv3 = NewBool(true)
+ }
+
+ if s.EnableLinkPreviews == nil {
+ s.EnableLinkPreviews = NewBool(false)
+ }
+
+ if s.EnableDeveloper == nil {
+ s.EnableDeveloper = NewBool(false)
+ }
+
+ if s.EnableSecurityFixAlert == nil {
+ s.EnableSecurityFixAlert = NewBool(true)
+ }
+
+ if s.EnableInsecureOutgoingConnections == nil {
+ s.EnableInsecureOutgoingConnections = NewBool(false)
+ }
+
+ if s.AllowedUntrustedInternalConnections == nil {
+ s.AllowedUntrustedInternalConnections = NewString("")
+ }
+
+ if s.EnableMultifactorAuthentication == nil {
+ s.EnableMultifactorAuthentication = NewBool(false)
+ }
+
+ if s.EnforceMultifactorAuthentication == nil {
+ s.EnforceMultifactorAuthentication = NewBool(false)
+ }
+
+ if s.EnableUserAccessTokens == nil {
+ s.EnableUserAccessTokens = NewBool(false)
+ }
+
+ if s.GoroutineHealthThreshold == nil {
+ s.GoroutineHealthThreshold = NewInt(-1)
+ }
+
+ if s.ConnectionSecurity == nil {
+ s.ConnectionSecurity = NewString("")
+ }
+
+ if s.TLSKeyFile == nil {
+ s.TLSKeyFile = NewString(SERVICE_SETTINGS_DEFAULT_TLS_KEY_FILE)
+ }
+
+ if s.TLSCertFile == nil {
+ s.TLSCertFile = NewString(SERVICE_SETTINGS_DEFAULT_TLS_CERT_FILE)
+ }
+
+ if s.UseLetsEncrypt == nil {
+ s.UseLetsEncrypt = NewBool(false)
+ }
+
+ if s.LetsEncryptCertificateCacheFile == nil {
+ s.LetsEncryptCertificateCacheFile = NewString("./config/letsencrypt.cache")
+ }
+
+ if s.ReadTimeout == nil {
+ s.ReadTimeout = NewInt(SERVICE_SETTINGS_DEFAULT_READ_TIMEOUT)
+ }
+
+ if s.WriteTimeout == nil {
+ s.WriteTimeout = NewInt(SERVICE_SETTINGS_DEFAULT_WRITE_TIMEOUT)
+ }
+
+ if s.MaximumLoginAttempts == nil {
+ s.MaximumLoginAttempts = NewInt(SERVICE_SETTINGS_DEFAULT_MAX_LOGIN_ATTEMPTS)
+ }
+
+ if s.Forward80To443 == nil {
+ s.Forward80To443 = NewBool(false)
+ }
+
+ if s.TimeBetweenUserTypingUpdatesMilliseconds == nil {
+ s.TimeBetweenUserTypingUpdatesMilliseconds = NewInt64(5000)
+ }
+
+ if s.EnablePostSearch == nil {
+ s.EnablePostSearch = NewBool(true)
+ }
+
+ if s.EnableUserTypingMessages == nil {
+ s.EnableUserTypingMessages = NewBool(true)
+ }
+
+ if s.EnableChannelViewedMessages == nil {
+ s.EnableChannelViewedMessages = NewBool(true)
+ }
+
+ if s.EnableUserStatuses == nil {
+ s.EnableUserStatuses = NewBool(true)
+ }
+
+ if s.ClusterLogTimeoutMilliseconds == nil {
+ s.ClusterLogTimeoutMilliseconds = NewInt(2000)
+ }
+
+ if s.CloseUnusedDirectMessages == nil {
+ s.CloseUnusedDirectMessages = NewBool(false)
+ }
+
+ if s.EnableTutorial == nil {
+ s.EnableTutorial = NewBool(true)
+ }
+
+ if s.SessionLengthWebInDays == nil {
+ s.SessionLengthWebInDays = NewInt(30)
+ }
+
+ if s.SessionLengthMobileInDays == nil {
+ s.SessionLengthMobileInDays = NewInt(30)
+ }
+
+ if s.SessionLengthSSOInDays == nil {
+ s.SessionLengthSSOInDays = NewInt(30)
+ }
+
+ if s.SessionCacheInMinutes == nil {
+ s.SessionCacheInMinutes = NewInt(10)
+ }
+
+ if s.SessionIdleTimeoutInMinutes == nil {
+ s.SessionIdleTimeoutInMinutes = NewInt(0)
+ }
+
+ if s.EnableCommands == nil {
+ s.EnableCommands = NewBool(false)
+ }
+
+ if s.EnableOnlyAdminIntegrations == nil {
+ s.EnableOnlyAdminIntegrations = NewBool(true)
+ }
+
+ if s.WebsocketPort == nil {
+ s.WebsocketPort = NewInt(80)
+ }
+
+ if s.WebsocketSecurePort == nil {
+ s.WebsocketSecurePort = NewInt(443)
+ }
+
+ if s.AllowCorsFrom == nil {
+ s.AllowCorsFrom = NewString(SERVICE_SETTINGS_DEFAULT_ALLOW_CORS_FROM)
+ }
+
+ if s.WebserverMode == nil {
+ s.WebserverMode = NewString("gzip")
+ } else if *s.WebserverMode == "regular" {
+ *s.WebserverMode = "gzip"
+ }
+
+ if s.EnableCustomEmoji == nil {
+ s.EnableCustomEmoji = NewBool(false)
+ }
+
+ if s.EnableEmojiPicker == nil {
+ s.EnableEmojiPicker = NewBool(true)
+ }
+
+ if s.RestrictCustomEmojiCreation == nil {
+ s.RestrictCustomEmojiCreation = NewString(RESTRICT_EMOJI_CREATION_ALL)
+ }
+
+ if s.RestrictPostDelete == nil {
+ s.RestrictPostDelete = NewString(PERMISSIONS_DELETE_POST_ALL)
+ }
+
+ if s.AllowEditPost == nil {
+ s.AllowEditPost = NewString(ALLOW_EDIT_POST_ALWAYS)
+ }
+
+ if s.ExperimentalEnableAuthenticationTransfer == nil {
+ s.ExperimentalEnableAuthenticationTransfer = NewBool(true)
+ }
+
+ if s.PostEditTimeLimit == nil {
+ s.PostEditTimeLimit = NewInt(300)
+ }
+
+ if s.EnablePreviewFeatures == nil {
+ s.EnablePreviewFeatures = NewBool(true)
+ }
+
+ if s.ExperimentalEnableDefaultChannelLeaveJoinMessages == nil {
+ s.ExperimentalEnableDefaultChannelLeaveJoinMessages = NewBool(true)
+ }
+
+ if s.ExperimentalGroupUnreadChannels == nil {
+ s.ExperimentalGroupUnreadChannels = NewString(GROUP_UNREAD_CHANNELS_DISABLED)
+ } else if *s.ExperimentalGroupUnreadChannels == "0" {
+ s.ExperimentalGroupUnreadChannels = NewString(GROUP_UNREAD_CHANNELS_DISABLED)
+ } else if *s.ExperimentalGroupUnreadChannels == "1" {
+ s.ExperimentalGroupUnreadChannels = NewString(GROUP_UNREAD_CHANNELS_DEFAULT_ON)
+ }
+
+ if s.ImageProxyType == nil {
+ s.ImageProxyType = NewString("")
+ }
+
+ if s.ImageProxyURL == nil {
+ s.ImageProxyURL = NewString("")
+ }
+
+ if s.ImageProxyOptions == nil {
+ s.ImageProxyOptions = NewString("")
+ }
+}
+
+type ClusterSettings struct {
+ Enable *bool
+ ClusterName *string
+ OverrideHostname *string
+ UseIpAddress *bool
+ UseExperimentalGossip *bool
+ ReadOnlyConfig *bool
+ GossipPort *int
+ StreamingPort *int
+}
+
+func (s *ClusterSettings) SetDefaults() {
+ if s.Enable == nil {
+ s.Enable = NewBool(false)
+ }
+
+ if s.ClusterName == nil {
+ s.ClusterName = NewString("")
+ }
+
+ if s.OverrideHostname == nil {
+ s.OverrideHostname = NewString("")
+ }
+
+ if s.UseIpAddress == nil {
+ s.UseIpAddress = NewBool(true)
+ }
+
+ if s.UseExperimentalGossip == nil {
+ s.UseExperimentalGossip = NewBool(false)
+ }
+
+ if s.ReadOnlyConfig == nil {
+ s.ReadOnlyConfig = NewBool(true)
+ }
+
+ if s.GossipPort == nil {
+ s.GossipPort = NewInt(8074)
+ }
+
+ if s.StreamingPort == nil {
+ s.StreamingPort = NewInt(8075)
+ }
+}
+
+type MetricsSettings struct {
+ Enable *bool
+ BlockProfileRate *int
+ ListenAddress *string
+}
+
+func (s *MetricsSettings) SetDefaults() {
+ if s.ListenAddress == nil {
+ s.ListenAddress = NewString(":8067")
+ }
+
+ if s.Enable == nil {
+ s.Enable = NewBool(false)
+ }
+
+ if s.BlockProfileRate == nil {
+ s.BlockProfileRate = NewInt(0)
+ }
+}
+
+type AnalyticsSettings struct {
+ MaxUsersForStatistics *int
+}
+
+func (s *AnalyticsSettings) SetDefaults() {
+ if s.MaxUsersForStatistics == nil {
+ s.MaxUsersForStatistics = NewInt(ANALYTICS_SETTINGS_DEFAULT_MAX_USERS_FOR_STATISTICS)
+ }
+}
+
+type SSOSettings struct {
+ Enable bool
+ Secret string
+ Id string
+ Scope string
+ AuthEndpoint string
+ TokenEndpoint string
+ UserApiEndpoint string
+}
+
+type SqlSettings struct {
+ DriverName *string
+ DataSource *string
+ DataSourceReplicas []string
+ DataSourceSearchReplicas []string
+ MaxIdleConns *int
+ MaxOpenConns *int
+ Trace bool
+ AtRestEncryptKey string
+ QueryTimeout *int
+}
+
+func (s *SqlSettings) SetDefaults() {
+ if s.DriverName == nil {
+ s.DriverName = NewString(DATABASE_DRIVER_MYSQL)
+ }
+
+ if s.DataSource == nil {
+ s.DataSource = NewString(SQL_SETTINGS_DEFAULT_DATA_SOURCE)
+ }
+
+ if len(s.AtRestEncryptKey) == 0 {
+ s.AtRestEncryptKey = NewRandomString(32)
+ }
+
+ if s.MaxIdleConns == nil {
+ s.MaxIdleConns = NewInt(20)
+ }
+
+ if s.MaxOpenConns == nil {
+ s.MaxOpenConns = NewInt(300)
+ }
+
+ if s.QueryTimeout == nil {
+ s.QueryTimeout = NewInt(30)
+ }
+}
+
+type LogSettings struct {
+ EnableConsole bool
+ ConsoleLevel string
+ EnableFile bool
+ FileLevel string
+ FileFormat string
+ FileLocation string
+ EnableWebhookDebugging bool
+ EnableDiagnostics *bool
+}
+
+func (s *LogSettings) SetDefaults() {
+ if s.EnableDiagnostics == nil {
+ s.EnableDiagnostics = NewBool(true)
+ }
+}
+
+type PasswordSettings struct {
+ MinimumLength *int
+ Lowercase *bool
+ Number *bool
+ Uppercase *bool
+ Symbol *bool
+}
+
+func (s *PasswordSettings) SetDefaults() {
+ if s.MinimumLength == nil {
+ s.MinimumLength = NewInt(PASSWORD_MINIMUM_LENGTH)
+ }
+
+ if s.Lowercase == nil {
+ s.Lowercase = NewBool(false)
+ }
+
+ if s.Number == nil {
+ s.Number = NewBool(false)
+ }
+
+ if s.Uppercase == nil {
+ s.Uppercase = NewBool(false)
+ }
+
+ if s.Symbol == nil {
+ s.Symbol = NewBool(false)
+ }
+}
+
+type FileSettings struct {
+ EnableFileAttachments *bool
+ EnableMobileUpload *bool
+ EnableMobileDownload *bool
+ MaxFileSize *int64
+ DriverName *string
+ Directory string
+ EnablePublicLink bool
+ PublicLinkSalt *string
+ InitialFont string
+ AmazonS3AccessKeyId string
+ AmazonS3SecretAccessKey string
+ AmazonS3Bucket string
+ AmazonS3Region string
+ AmazonS3Endpoint string
+ AmazonS3SSL *bool
+ AmazonS3SignV2 *bool
+ AmazonS3SSE *bool
+ AmazonS3Trace *bool
+}
+
+func (s *FileSettings) SetDefaults() {
+ if s.DriverName == nil {
+ s.DriverName = NewString(IMAGE_DRIVER_LOCAL)
+ }
+
+ if s.AmazonS3Endpoint == "" {
+ // Defaults to "s3.amazonaws.com"
+ s.AmazonS3Endpoint = "s3.amazonaws.com"
+ }
+
+ if s.AmazonS3SSL == nil {
+ s.AmazonS3SSL = NewBool(true) // Secure by default.
+ }
+
+ if s.AmazonS3SignV2 == nil {
+ s.AmazonS3SignV2 = new(bool)
+ // Signature v2 is not enabled by default.
+ }
+
+ if s.AmazonS3SSE == nil {
+ s.AmazonS3SSE = NewBool(false) // Not Encrypted by default.
+ }
+
+ if s.AmazonS3Trace == nil {
+ s.AmazonS3Trace = NewBool(false)
+ }
+
+ if s.EnableFileAttachments == nil {
+ s.EnableFileAttachments = NewBool(true)
+ }
+
+ if s.EnableMobileUpload == nil {
+ s.EnableMobileUpload = NewBool(true)
+ }
+
+ if s.EnableMobileDownload == nil {
+ s.EnableMobileDownload = NewBool(true)
+ }
+
+ if s.MaxFileSize == nil {
+ s.MaxFileSize = NewInt64(52428800) // 50 MB
+ }
+
+ if s.PublicLinkSalt == nil || len(*s.PublicLinkSalt) == 0 {
+ s.PublicLinkSalt = NewString(NewRandomString(32))
+ }
+
+ if s.InitialFont == "" {
+ // Defaults to "luximbi.ttf"
+ s.InitialFont = "luximbi.ttf"
+ }
+
+ if s.Directory == "" {
+ s.Directory = "./data/"
+ }
+}
+
+type EmailSettings struct {
+ EnableSignUpWithEmail bool
+ EnableSignInWithEmail *bool
+ EnableSignInWithUsername *bool
+ SendEmailNotifications bool
+ UseChannelInEmailNotifications *bool
+ RequireEmailVerification bool
+ FeedbackName string
+ FeedbackEmail string
+ FeedbackOrganization *string
+ EnableSMTPAuth *bool
+ SMTPUsername string
+ SMTPPassword string
+ SMTPServer string
+ SMTPPort string
+ ConnectionSecurity string
+ InviteSalt string
+ SendPushNotifications *bool
+ PushNotificationServer *string
+ PushNotificationContents *string
+ EnableEmailBatching *bool
+ EmailBatchingBufferSize *int
+ EmailBatchingInterval *int
+ SkipServerCertificateVerification *bool
+ EmailNotificationContentsType *string
+ LoginButtonColor *string
+ LoginButtonBorderColor *string
+ LoginButtonTextColor *string
+}
+
+func (s *EmailSettings) SetDefaults() {
+ if len(s.InviteSalt) == 0 {
+ s.InviteSalt = NewRandomString(32)
+ }
+
+ if s.EnableSignInWithEmail == nil {
+ s.EnableSignInWithEmail = NewBool(s.EnableSignUpWithEmail)
+ }
+
+ if s.EnableSignInWithUsername == nil {
+ s.EnableSignInWithUsername = NewBool(false)
+ }
+
+ if s.UseChannelInEmailNotifications == nil {
+ s.UseChannelInEmailNotifications = NewBool(false)
+ }
+
+ if s.SendPushNotifications == nil {
+ s.SendPushNotifications = NewBool(false)
+ }
+
+ if s.PushNotificationServer == nil {
+ s.PushNotificationServer = NewString("")
+ }
+
+ if s.PushNotificationContents == nil {
+ s.PushNotificationContents = NewString(GENERIC_NOTIFICATION)
+ }
+
+ if s.FeedbackOrganization == nil {
+ s.FeedbackOrganization = NewString(EMAIL_SETTINGS_DEFAULT_FEEDBACK_ORGANIZATION)
+ }
+
+ if s.EnableEmailBatching == nil {
+ s.EnableEmailBatching = NewBool(false)
+ }
+
+ if s.EmailBatchingBufferSize == nil {
+ s.EmailBatchingBufferSize = NewInt(EMAIL_BATCHING_BUFFER_SIZE)
+ }
+
+ if s.EmailBatchingInterval == nil {
+ s.EmailBatchingInterval = NewInt(EMAIL_BATCHING_INTERVAL)
+ }
+
+ if s.EnableSMTPAuth == nil {
+ s.EnableSMTPAuth = new(bool)
+ if s.ConnectionSecurity == CONN_SECURITY_NONE {
+ *s.EnableSMTPAuth = false
+ } else {
+ *s.EnableSMTPAuth = true
+ }
+ }
+
+ if s.ConnectionSecurity == CONN_SECURITY_PLAIN {
+ s.ConnectionSecurity = CONN_SECURITY_NONE
+ }
+
+ if s.SkipServerCertificateVerification == nil {
+ s.SkipServerCertificateVerification = NewBool(false)
+ }
+
+ if s.EmailNotificationContentsType == nil {
+ s.EmailNotificationContentsType = NewString(EMAIL_NOTIFICATION_CONTENTS_FULL)
+ }
+
+ if s.LoginButtonColor == nil {
+ s.LoginButtonColor = NewString("#0000")
+ }
+
+ if s.LoginButtonBorderColor == nil {
+ s.LoginButtonBorderColor = NewString("#2389D7")
+ }
+
+ if s.LoginButtonTextColor == nil {
+ s.LoginButtonTextColor = NewString("#2389D7")
+ }
+}
+
+type RateLimitSettings struct {
+ Enable *bool
+ PerSec *int
+ MaxBurst *int
+ MemoryStoreSize *int
+ VaryByRemoteAddr *bool
+ VaryByUser *bool
+ VaryByHeader string
+}
+
+func (s *RateLimitSettings) SetDefaults() {
+ if s.Enable == nil {
+ s.Enable = NewBool(false)
+ }
+
+ if s.PerSec == nil {
+ s.PerSec = NewInt(10)
+ }
+
+ if s.MaxBurst == nil {
+ s.MaxBurst = NewInt(100)
+ }
+
+ if s.MemoryStoreSize == nil {
+ s.MemoryStoreSize = NewInt(10000)
+ }
+
+ if s.VaryByRemoteAddr == nil {
+ s.VaryByRemoteAddr = NewBool(true)
+ }
+
+ if s.VaryByUser == nil {
+ s.VaryByUser = NewBool(false)
+ }
+}
+
+type PrivacySettings struct {
+ ShowEmailAddress bool
+ ShowFullName bool
+}
+
+type SupportSettings struct {
+ TermsOfServiceLink *string
+ PrivacyPolicyLink *string
+ AboutLink *string
+ HelpLink *string
+ ReportAProblemLink *string
+ SupportEmail *string
+}
+
+func (s *SupportSettings) SetDefaults() {
+ if !IsSafeLink(s.TermsOfServiceLink) {
+ *s.TermsOfServiceLink = SUPPORT_SETTINGS_DEFAULT_TERMS_OF_SERVICE_LINK
+ }
+
+ if s.TermsOfServiceLink == nil {
+ s.TermsOfServiceLink = NewString(SUPPORT_SETTINGS_DEFAULT_TERMS_OF_SERVICE_LINK)
+ }
+
+ if !IsSafeLink(s.PrivacyPolicyLink) {
+ *s.PrivacyPolicyLink = ""
+ }
+
+ if s.PrivacyPolicyLink == nil {
+ s.PrivacyPolicyLink = NewString(SUPPORT_SETTINGS_DEFAULT_PRIVACY_POLICY_LINK)
+ }
+
+ if !IsSafeLink(s.AboutLink) {
+ *s.AboutLink = ""
+ }
+
+ if s.AboutLink == nil {
+ s.AboutLink = NewString(SUPPORT_SETTINGS_DEFAULT_ABOUT_LINK)
+ }
+
+ if !IsSafeLink(s.HelpLink) {
+ *s.HelpLink = ""
+ }
+
+ if s.HelpLink == nil {
+ s.HelpLink = NewString(SUPPORT_SETTINGS_DEFAULT_HELP_LINK)
+ }
+
+ if !IsSafeLink(s.ReportAProblemLink) {
+ *s.ReportAProblemLink = ""
+ }
+
+ if s.ReportAProblemLink == nil {
+ s.ReportAProblemLink = NewString(SUPPORT_SETTINGS_DEFAULT_REPORT_A_PROBLEM_LINK)
+ }
+
+ if s.SupportEmail == nil {
+ s.SupportEmail = NewString(SUPPORT_SETTINGS_DEFAULT_SUPPORT_EMAIL)
+ }
+}
+
+type AnnouncementSettings struct {
+ EnableBanner *bool
+ BannerText *string
+ BannerColor *string
+ BannerTextColor *string
+ AllowBannerDismissal *bool
+}
+
+func (s *AnnouncementSettings) SetDefaults() {
+ if s.EnableBanner == nil {
+ s.EnableBanner = NewBool(false)
+ }
+
+ if s.BannerText == nil {
+ s.BannerText = NewString("")
+ }
+
+ if s.BannerColor == nil {
+ s.BannerColor = NewString(ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_COLOR)
+ }
+
+ if s.BannerTextColor == nil {
+ s.BannerTextColor = NewString(ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_TEXT_COLOR)
+ }
+
+ if s.AllowBannerDismissal == nil {
+ s.AllowBannerDismissal = NewBool(true)
+ }
+}
+
+type ThemeSettings struct {
+ EnableThemeSelection *bool
+ DefaultTheme *string
+ AllowCustomThemes *bool
+ AllowedThemes []string
+}
+
+func (s *ThemeSettings) SetDefaults() {
+ if s.EnableThemeSelection == nil {
+ s.EnableThemeSelection = NewBool(true)
+ }
+
+ if s.DefaultTheme == nil {
+ s.DefaultTheme = NewString(TEAM_SETTINGS_DEFAULT_TEAM_TEXT)
+ }
+
+ if s.AllowCustomThemes == nil {
+ s.AllowCustomThemes = NewBool(true)
+ }
+
+ if s.AllowedThemes == nil {
+ s.AllowedThemes = []string{}
+ }
+}
+
+type TeamSettings struct {
+ SiteName string
+ MaxUsersPerTeam *int
+ EnableTeamCreation bool
+ EnableUserCreation bool
+ EnableOpenServer *bool
+ RestrictCreationToDomains string
+ EnableCustomBrand *bool
+ CustomBrandText *string
+ CustomDescriptionText *string
+ RestrictDirectMessage *string
+ RestrictTeamInvite *string
+ RestrictPublicChannelManagement *string
+ RestrictPrivateChannelManagement *string
+ RestrictPublicChannelCreation *string
+ RestrictPrivateChannelCreation *string
+ RestrictPublicChannelDeletion *string
+ RestrictPrivateChannelDeletion *string
+ RestrictPrivateChannelManageMembers *string
+ EnableXToLeaveChannelsFromLHS *bool
+ UserStatusAwayTimeout *int64
+ MaxChannelsPerTeam *int64
+ MaxNotificationsPerChannel *int64
+ EnableConfirmNotificationsToChannel *bool
+ TeammateNameDisplay *string
+ ExperimentalTownSquareIsReadOnly *bool
+ ExperimentalPrimaryTeam *string
+}
+
+func (s *TeamSettings) SetDefaults() {
+ if s.MaxUsersPerTeam == nil {
+ s.MaxUsersPerTeam = NewInt(TEAM_SETTINGS_DEFAULT_MAX_USERS_PER_TEAM)
+ }
+
+ if s.EnableCustomBrand == nil {
+ s.EnableCustomBrand = NewBool(false)
+ }
+
+ if s.CustomBrandText == nil {
+ s.CustomBrandText = NewString(TEAM_SETTINGS_DEFAULT_CUSTOM_BRAND_TEXT)
+ }
+
+ if s.CustomDescriptionText == nil {
+ s.CustomDescriptionText = NewString(TEAM_SETTINGS_DEFAULT_CUSTOM_DESCRIPTION_TEXT)
+ }
+
+ if s.EnableOpenServer == nil {
+ s.EnableOpenServer = NewBool(false)
+ }
+
+ if s.RestrictDirectMessage == nil {
+ s.RestrictDirectMessage = NewString(DIRECT_MESSAGE_ANY)
+ }
+
+ if s.RestrictTeamInvite == nil {
+ s.RestrictTeamInvite = NewString(PERMISSIONS_ALL)
+ }
+
+ if s.RestrictPublicChannelManagement == nil {
+ s.RestrictPublicChannelManagement = NewString(PERMISSIONS_ALL)
+ }
+
+ if s.RestrictPrivateChannelManagement == nil {
+ s.RestrictPrivateChannelManagement = NewString(PERMISSIONS_ALL)
+ }
+
+ if s.RestrictPublicChannelCreation == nil {
+ s.RestrictPublicChannelCreation = new(string)
+ // If this setting does not exist, assume migration from <3.6, so use management setting as default.
+ if *s.RestrictPublicChannelManagement == PERMISSIONS_CHANNEL_ADMIN {
+ *s.RestrictPublicChannelCreation = PERMISSIONS_TEAM_ADMIN
+ } else {
+ *s.RestrictPublicChannelCreation = *s.RestrictPublicChannelManagement
+ }
+ }
+
+ if s.RestrictPrivateChannelCreation == nil {
+ // If this setting does not exist, assume migration from <3.6, so use management setting as default.
+ if *s.RestrictPrivateChannelManagement == PERMISSIONS_CHANNEL_ADMIN {
+ s.RestrictPrivateChannelCreation = NewString(PERMISSIONS_TEAM_ADMIN)
+ } else {
+ s.RestrictPrivateChannelCreation = NewString(*s.RestrictPrivateChannelManagement)
+ }
+ }
+
+ if s.RestrictPublicChannelDeletion == nil {
+ // If this setting does not exist, assume migration from <3.6, so use management setting as default.
+ s.RestrictPublicChannelDeletion = NewString(*s.RestrictPublicChannelManagement)
+ }
+
+ if s.RestrictPrivateChannelDeletion == nil {
+ // If this setting does not exist, assume migration from <3.6, so use management setting as default.
+ s.RestrictPrivateChannelDeletion = NewString(*s.RestrictPrivateChannelManagement)
+ }
+
+ if s.RestrictPrivateChannelManageMembers == nil {
+ s.RestrictPrivateChannelManageMembers = NewString(PERMISSIONS_ALL)
+ }
+
+ if s.EnableXToLeaveChannelsFromLHS == nil {
+ s.EnableXToLeaveChannelsFromLHS = NewBool(false)
+ }
+
+ if s.UserStatusAwayTimeout == nil {
+ s.UserStatusAwayTimeout = NewInt64(TEAM_SETTINGS_DEFAULT_USER_STATUS_AWAY_TIMEOUT)
+ }
+
+ if s.MaxChannelsPerTeam == nil {
+ s.MaxChannelsPerTeam = NewInt64(2000)
+ }
+
+ if s.MaxNotificationsPerChannel == nil {
+ s.MaxNotificationsPerChannel = NewInt64(1000)
+ }
+
+ if s.EnableConfirmNotificationsToChannel == nil {
+ s.EnableConfirmNotificationsToChannel = NewBool(true)
+ }
+
+ if s.ExperimentalTownSquareIsReadOnly == nil {
+ s.ExperimentalTownSquareIsReadOnly = NewBool(false)
+ }
+
+ if s.ExperimentalPrimaryTeam == nil {
+ s.ExperimentalPrimaryTeam = NewString("")
+ }
+}
+
+type ClientRequirements struct {
+ AndroidLatestVersion string
+ AndroidMinVersion string
+ DesktopLatestVersion string
+ DesktopMinVersion string
+ IosLatestVersion string
+ IosMinVersion string
+}
+
+type LdapSettings struct {
+ // Basic
+ Enable *bool
+ EnableSync *bool
+ LdapServer *string
+ LdapPort *int
+ ConnectionSecurity *string
+ BaseDN *string
+ BindUsername *string
+ BindPassword *string
+
+ // Filtering
+ UserFilter *string
+
+ // User Mapping
+ FirstNameAttribute *string
+ LastNameAttribute *string
+ EmailAttribute *string
+ UsernameAttribute *string
+ NicknameAttribute *string
+ IdAttribute *string
+ PositionAttribute *string
+
+ // Syncronization
+ SyncIntervalMinutes *int
+
+ // Advanced
+ SkipCertificateVerification *bool
+ QueryTimeout *int
+ MaxPageSize *int
+
+ // Customization
+ LoginFieldName *string
+
+ LoginButtonColor *string
+ LoginButtonBorderColor *string
+ LoginButtonTextColor *string
+}
+
+func (s *LdapSettings) SetDefaults() {
+ if s.Enable == nil {
+ s.Enable = NewBool(false)
+ }
+
+ // When unset should default to LDAP Enabled
+ if s.EnableSync == nil {
+ s.EnableSync = NewBool(*s.Enable)
+ }
+
+ if s.LdapServer == nil {
+ s.LdapServer = NewString("")
+ }
+
+ if s.LdapPort == nil {
+ s.LdapPort = NewInt(389)
+ }
+
+ if s.ConnectionSecurity == nil {
+ s.ConnectionSecurity = NewString("")
+ }
+
+ if s.BaseDN == nil {
+ s.BaseDN = NewString("")
+ }
+
+ if s.BindUsername == nil {
+ s.BindUsername = NewString("")
+ }
+
+ if s.BindPassword == nil {
+ s.BindPassword = NewString("")
+ }
+
+ if s.UserFilter == nil {
+ s.UserFilter = NewString("")
+ }
+
+ if s.FirstNameAttribute == nil {
+ s.FirstNameAttribute = NewString(LDAP_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE)
+ }
+
+ if s.LastNameAttribute == nil {
+ s.LastNameAttribute = NewString(LDAP_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE)
+ }
+
+ if s.EmailAttribute == nil {
+ s.EmailAttribute = NewString(LDAP_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE)
+ }
+
+ if s.UsernameAttribute == nil {
+ s.UsernameAttribute = NewString(LDAP_SETTINGS_DEFAULT_USERNAME_ATTRIBUTE)
+ }
+
+ if s.NicknameAttribute == nil {
+ s.NicknameAttribute = NewString(LDAP_SETTINGS_DEFAULT_NICKNAME_ATTRIBUTE)
+ }
+
+ if s.IdAttribute == nil {
+ s.IdAttribute = NewString(LDAP_SETTINGS_DEFAULT_ID_ATTRIBUTE)
+ }
+
+ if s.PositionAttribute == nil {
+ s.PositionAttribute = NewString(LDAP_SETTINGS_DEFAULT_POSITION_ATTRIBUTE)
+ }
+
+ if s.SyncIntervalMinutes == nil {
+ s.SyncIntervalMinutes = NewInt(60)
+ }
+
+ if s.SkipCertificateVerification == nil {
+ s.SkipCertificateVerification = NewBool(false)
+ }
+
+ if s.QueryTimeout == nil {
+ s.QueryTimeout = NewInt(60)
+ }
+
+ if s.MaxPageSize == nil {
+ s.MaxPageSize = NewInt(0)
+ }
+
+ if s.LoginFieldName == nil {
+ s.LoginFieldName = NewString(LDAP_SETTINGS_DEFAULT_LOGIN_FIELD_NAME)
+ }
+
+ if s.LoginButtonColor == nil {
+ s.LoginButtonColor = NewString("#0000")
+ }
+
+ if s.LoginButtonBorderColor == nil {
+ s.LoginButtonBorderColor = NewString("#2389D7")
+ }
+
+ if s.LoginButtonTextColor == nil {
+ s.LoginButtonTextColor = NewString("#2389D7")
+ }
+}
+
+type ComplianceSettings struct {
+ Enable *bool
+ Directory *string
+ EnableDaily *bool
+}
+
+func (s *ComplianceSettings) SetDefaults() {
+ if s.Enable == nil {
+ s.Enable = NewBool(false)
+ }
+
+ if s.Directory == nil {
+ s.Directory = NewString("./data/")
+ }
+
+ if s.EnableDaily == nil {
+ s.EnableDaily = NewBool(false)
+ }
+}
+
+type LocalizationSettings struct {
+ DefaultServerLocale *string
+ DefaultClientLocale *string
+ AvailableLocales *string
+}
+
+func (s *LocalizationSettings) SetDefaults() {
+ if s.DefaultServerLocale == nil {
+ s.DefaultServerLocale = NewString(DEFAULT_LOCALE)
+ }
+
+ if s.DefaultClientLocale == nil {
+ s.DefaultClientLocale = NewString(DEFAULT_LOCALE)
+ }
+
+ if s.AvailableLocales == nil {
+ s.AvailableLocales = NewString("")
+ }
+}
+
+type SamlSettings struct {
+ // Basic
+ Enable *bool
+ EnableSyncWithLdap *bool
+
+ Verify *bool
+ Encrypt *bool
+
+ IdpUrl *string
+ IdpDescriptorUrl *string
+ AssertionConsumerServiceURL *string
+
+ IdpCertificateFile *string
+ PublicCertificateFile *string
+ PrivateKeyFile *string
+
+ // User Mapping
+ FirstNameAttribute *string
+ LastNameAttribute *string
+ EmailAttribute *string
+ UsernameAttribute *string
+ NicknameAttribute *string
+ LocaleAttribute *string
+ PositionAttribute *string
+
+ LoginButtonText *string
+
+ LoginButtonColor *string
+ LoginButtonBorderColor *string
+ LoginButtonTextColor *string
+}
+
+func (s *SamlSettings) SetDefaults() {
+ if s.Enable == nil {
+ s.Enable = NewBool(false)
+ }
+
+ if s.EnableSyncWithLdap == nil {
+ s.EnableSyncWithLdap = NewBool(false)
+ }
+
+ if s.Verify == nil {
+ s.Verify = NewBool(true)
+ }
+
+ if s.Encrypt == nil {
+ s.Encrypt = NewBool(true)
+ }
+
+ if s.IdpUrl == nil {
+ s.IdpUrl = NewString("")
+ }
+
+ if s.IdpDescriptorUrl == nil {
+ s.IdpDescriptorUrl = NewString("")
+ }
+
+ if s.IdpCertificateFile == nil {
+ s.IdpCertificateFile = NewString("")
+ }
+
+ if s.PublicCertificateFile == nil {
+ s.PublicCertificateFile = NewString("")
+ }
+
+ if s.PrivateKeyFile == nil {
+ s.PrivateKeyFile = NewString("")
+ }
+
+ if s.AssertionConsumerServiceURL == nil {
+ s.AssertionConsumerServiceURL = NewString("")
+ }
+
+ if s.LoginButtonText == nil || *s.LoginButtonText == "" {
+ s.LoginButtonText = NewString(USER_AUTH_SERVICE_SAML_TEXT)
+ }
+
+ if s.FirstNameAttribute == nil {
+ s.FirstNameAttribute = NewString(SAML_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE)
+ }
+
+ if s.LastNameAttribute == nil {
+ s.LastNameAttribute = NewString(SAML_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE)
+ }
+
+ if s.EmailAttribute == nil {
+ s.EmailAttribute = NewString(SAML_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE)
+ }
+
+ if s.UsernameAttribute == nil {
+ s.UsernameAttribute = NewString(SAML_SETTINGS_DEFAULT_USERNAME_ATTRIBUTE)
+ }
+
+ if s.NicknameAttribute == nil {
+ s.NicknameAttribute = NewString(SAML_SETTINGS_DEFAULT_NICKNAME_ATTRIBUTE)
+ }
+
+ if s.PositionAttribute == nil {
+ s.PositionAttribute = NewString(SAML_SETTINGS_DEFAULT_POSITION_ATTRIBUTE)
+ }
+
+ if s.LocaleAttribute == nil {
+ s.LocaleAttribute = NewString(SAML_SETTINGS_DEFAULT_LOCALE_ATTRIBUTE)
+ }
+
+ if s.LoginButtonColor == nil {
+ s.LoginButtonColor = NewString("#34a28b")
+ }
+
+ if s.LoginButtonBorderColor == nil {
+ s.LoginButtonBorderColor = NewString("#2389D7")
+ }
+
+ if s.LoginButtonTextColor == nil {
+ s.LoginButtonTextColor = NewString("#ffffff")
+ }
+}
+
+type NativeAppSettings struct {
+ AppDownloadLink *string
+ AndroidAppDownloadLink *string
+ IosAppDownloadLink *string
+}
+
+func (s *NativeAppSettings) SetDefaults() {
+ if s.AppDownloadLink == nil {
+ s.AppDownloadLink = NewString(NATIVEAPP_SETTINGS_DEFAULT_APP_DOWNLOAD_LINK)
+ }
+
+ if s.AndroidAppDownloadLink == nil {
+ s.AndroidAppDownloadLink = NewString(NATIVEAPP_SETTINGS_DEFAULT_ANDROID_APP_DOWNLOAD_LINK)
+ }
+
+ if s.IosAppDownloadLink == nil {
+ s.IosAppDownloadLink = NewString(NATIVEAPP_SETTINGS_DEFAULT_IOS_APP_DOWNLOAD_LINK)
+ }
+}
+
+type WebrtcSettings struct {
+ Enable *bool
+ GatewayWebsocketUrl *string
+ GatewayAdminUrl *string
+ GatewayAdminSecret *string
+ StunURI *string
+ TurnURI *string
+ TurnUsername *string
+ TurnSharedKey *string
+}
+
+func (s *WebrtcSettings) SetDefaults() {
+ if s.Enable == nil {
+ s.Enable = NewBool(false)
+ }
+
+ if s.GatewayWebsocketUrl == nil {
+ s.GatewayWebsocketUrl = NewString("")
+ }
+
+ if s.GatewayAdminUrl == nil {
+ s.GatewayAdminUrl = NewString("")
+ }
+
+ if s.GatewayAdminSecret == nil {
+ s.GatewayAdminSecret = NewString("")
+ }
+
+ if s.StunURI == nil {
+ s.StunURI = NewString(WEBRTC_SETTINGS_DEFAULT_STUN_URI)
+ }
+
+ if s.TurnURI == nil {
+ s.TurnURI = NewString(WEBRTC_SETTINGS_DEFAULT_TURN_URI)
+ }
+
+ if s.TurnUsername == nil {
+ s.TurnUsername = NewString("")
+ }
+
+ if s.TurnSharedKey == nil {
+ s.TurnSharedKey = NewString("")
+ }
+}
+
+type ElasticsearchSettings struct {
+ ConnectionUrl *string
+ Username *string
+ Password *string
+ EnableIndexing *bool
+ EnableSearching *bool
+ Sniff *bool
+ PostIndexReplicas *int
+ PostIndexShards *int
+ AggregatePostsAfterDays *int
+ PostsAggregatorJobStartTime *string
+ IndexPrefix *string
+ LiveIndexingBatchSize *int
+ BulkIndexingTimeWindowSeconds *int
+ RequestTimeoutSeconds *int
+}
+
+func (s *ElasticsearchSettings) SetDefaults() {
+ if s.ConnectionUrl == nil {
+ s.ConnectionUrl = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_CONNECTION_URL)
+ }
+
+ if s.Username == nil {
+ s.Username = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_USERNAME)
+ }
+
+ if s.Password == nil {
+ s.Password = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_PASSWORD)
+ }
+
+ if s.EnableIndexing == nil {
+ s.EnableIndexing = NewBool(false)
+ }
+
+ if s.EnableSearching == nil {
+ s.EnableSearching = NewBool(false)
+ }
+
+ if s.Sniff == nil {
+ s.Sniff = NewBool(true)
+ }
+
+ if s.PostIndexReplicas == nil {
+ s.PostIndexReplicas = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_REPLICAS)
+ }
+
+ if s.PostIndexShards == nil {
+ s.PostIndexShards = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_SHARDS)
+ }
+
+ if s.AggregatePostsAfterDays == nil {
+ s.AggregatePostsAfterDays = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_AGGREGATE_POSTS_AFTER_DAYS)
+ }
+
+ if s.PostsAggregatorJobStartTime == nil {
+ s.PostsAggregatorJobStartTime = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_POSTS_AGGREGATOR_JOB_START_TIME)
+ }
+
+ if s.IndexPrefix == nil {
+ s.IndexPrefix = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_INDEX_PREFIX)
+ }
+
+ if s.LiveIndexingBatchSize == nil {
+ s.LiveIndexingBatchSize = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_LIVE_INDEXING_BATCH_SIZE)
+ }
+
+ if s.BulkIndexingTimeWindowSeconds == nil {
+ s.BulkIndexingTimeWindowSeconds = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_BULK_INDEXING_TIME_WINDOW_SECONDS)
+ }
+
+ if s.RequestTimeoutSeconds == nil {
+ s.RequestTimeoutSeconds = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_REQUEST_TIMEOUT_SECONDS)
+ }
+}
+
+type DataRetentionSettings struct {
+ EnableMessageDeletion *bool
+ EnableFileDeletion *bool
+ MessageRetentionDays *int
+ FileRetentionDays *int
+ DeletionJobStartTime *string
+}
+
+func (s *DataRetentionSettings) SetDefaults() {
+ if s.EnableMessageDeletion == nil {
+ s.EnableMessageDeletion = NewBool(false)
+ }
+
+ if s.EnableFileDeletion == nil {
+ s.EnableFileDeletion = NewBool(false)
+ }
+
+ if s.MessageRetentionDays == nil {
+ s.MessageRetentionDays = NewInt(DATA_RETENTION_SETTINGS_DEFAULT_MESSAGE_RETENTION_DAYS)
+ }
+
+ if s.FileRetentionDays == nil {
+ s.FileRetentionDays = NewInt(DATA_RETENTION_SETTINGS_DEFAULT_FILE_RETENTION_DAYS)
+ }
+
+ if s.DeletionJobStartTime == nil {
+ s.DeletionJobStartTime = NewString(DATA_RETENTION_SETTINGS_DEFAULT_DELETION_JOB_START_TIME)
+ }
+}
+
+type JobSettings struct {
+ RunJobs *bool
+ RunScheduler *bool
+}
+
+func (s *JobSettings) SetDefaults() {
+ if s.RunJobs == nil {
+ s.RunJobs = NewBool(true)
+ }
+
+ if s.RunScheduler == nil {
+ s.RunScheduler = NewBool(true)
+ }
+}
+
+type PluginState struct {
+ Enable bool
+}
+
+type PluginSettings struct {
+ Enable *bool
+ EnableUploads *bool
+ Directory *string
+ ClientDirectory *string
+ Plugins map[string]interface{}
+ PluginStates map[string]*PluginState
+}
+
+func (s *PluginSettings) SetDefaults() {
+ if s.Enable == nil {
+ s.Enable = NewBool(true)
+ }
+
+ if s.EnableUploads == nil {
+ s.EnableUploads = NewBool(false)
+ }
+
+ if s.Directory == nil {
+ s.Directory = NewString(PLUGIN_SETTINGS_DEFAULT_DIRECTORY)
+ }
+
+ if *s.Directory == "" {
+ *s.Directory = PLUGIN_SETTINGS_DEFAULT_DIRECTORY
+ }
+
+ if s.ClientDirectory == nil {
+ s.ClientDirectory = NewString(PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY)
+ }
+
+ if *s.ClientDirectory == "" {
+ *s.ClientDirectory = PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY
+ }
+
+ if s.Plugins == nil {
+ s.Plugins = make(map[string]interface{})
+ }
+
+ if s.PluginStates == nil {
+ s.PluginStates = make(map[string]*PluginState)
+ }
+}
+
+type MessageExportSettings struct {
+ EnableExport *bool
+ ExportFormat *string
+ DailyRunTime *string
+ ExportFromTimestamp *int64
+ BatchSize *int
+
+ // formatter-specific settings - these are only expected to be non-nil if ExportFormat is set to the associated format
+ GlobalRelayEmailAddress *string
+}
+
+func (s *MessageExportSettings) SetDefaults() {
+ if s.EnableExport == nil {
+ s.EnableExport = NewBool(false)
+ }
+
+ if s.ExportFormat == nil {
+ s.ExportFormat = NewString(COMPLIANCE_EXPORT_TYPE_ACTIANCE)
+ }
+
+ if s.DailyRunTime == nil {
+ s.DailyRunTime = NewString("01:00")
+ }
+
+ if s.ExportFromTimestamp == nil {
+ s.ExportFromTimestamp = NewInt64(0)
+ }
+
+ if s.EnableExport != nil && *s.EnableExport && *s.ExportFromTimestamp == int64(0) {
+ // when the feature is enabled via the System Console, use the current timestamp as the start time for future exports
+ s.ExportFromTimestamp = NewInt64(GetMillis())
+ } else if s.EnableExport != nil && !*s.EnableExport {
+ // when the feature is disabled, reset the timestamp so that the timestamp will be set if the feature is re-enabled
+ s.ExportFromTimestamp = NewInt64(0)
+ }
+
+ if s.BatchSize == nil {
+ s.BatchSize = NewInt(10000)
+ }
+}
+
+type ConfigFunc func() *Config
+
+type Config struct {
+ ServiceSettings ServiceSettings
+ TeamSettings TeamSettings
+ ClientRequirements ClientRequirements
+ SqlSettings SqlSettings
+ LogSettings LogSettings
+ PasswordSettings PasswordSettings
+ FileSettings FileSettings
+ EmailSettings EmailSettings
+ RateLimitSettings RateLimitSettings
+ PrivacySettings PrivacySettings
+ SupportSettings SupportSettings
+ AnnouncementSettings AnnouncementSettings
+ ThemeSettings ThemeSettings
+ GitLabSettings SSOSettings
+ GoogleSettings SSOSettings
+ Office365Settings SSOSettings
+ LdapSettings LdapSettings
+ ComplianceSettings ComplianceSettings
+ LocalizationSettings LocalizationSettings
+ SamlSettings SamlSettings
+ NativeAppSettings NativeAppSettings
+ ClusterSettings ClusterSettings
+ MetricsSettings MetricsSettings
+ AnalyticsSettings AnalyticsSettings
+ WebrtcSettings WebrtcSettings
+ ElasticsearchSettings ElasticsearchSettings
+ DataRetentionSettings DataRetentionSettings
+ MessageExportSettings MessageExportSettings
+ JobSettings JobSettings
+ PluginSettings PluginSettings
+}
+
+func (o *Config) Clone() *Config {
+ var ret Config
+ if err := json.Unmarshal([]byte(o.ToJson()), &ret); err != nil {
+ panic(err)
+ }
+ return &ret
+}
+
+func (o *Config) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func (o *Config) GetSSOService(service string) *SSOSettings {
+ switch service {
+ case SERVICE_GITLAB:
+ return &o.GitLabSettings
+ case SERVICE_GOOGLE:
+ return &o.GoogleSettings
+ case SERVICE_OFFICE365:
+ return &o.Office365Settings
+ }
+
+ return nil
+}
+
+func ConfigFromJson(data io.Reader) *Config {
+ var o *Config
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o *Config) SetDefaults() {
+ o.LdapSettings.SetDefaults()
+ o.SamlSettings.SetDefaults()
+
+ if o.TeamSettings.TeammateNameDisplay == nil {
+ o.TeamSettings.TeammateNameDisplay = NewString(SHOW_USERNAME)
+
+ if *o.SamlSettings.Enable || *o.LdapSettings.Enable {
+ *o.TeamSettings.TeammateNameDisplay = SHOW_FULLNAME
+ }
+ }
+
+ o.SqlSettings.SetDefaults()
+ o.FileSettings.SetDefaults()
+ o.EmailSettings.SetDefaults()
+ o.ServiceSettings.SetDefaults()
+ o.PasswordSettings.SetDefaults()
+ o.TeamSettings.SetDefaults()
+ o.MetricsSettings.SetDefaults()
+ o.SupportSettings.SetDefaults()
+ o.AnnouncementSettings.SetDefaults()
+ o.ThemeSettings.SetDefaults()
+ o.ClusterSettings.SetDefaults()
+ o.PluginSettings.SetDefaults()
+ o.AnalyticsSettings.SetDefaults()
+ o.ComplianceSettings.SetDefaults()
+ o.LocalizationSettings.SetDefaults()
+ o.ElasticsearchSettings.SetDefaults()
+ o.NativeAppSettings.SetDefaults()
+ o.DataRetentionSettings.SetDefaults()
+ o.RateLimitSettings.SetDefaults()
+ o.LogSettings.SetDefaults()
+ o.JobSettings.SetDefaults()
+ o.WebrtcSettings.SetDefaults()
+ o.MessageExportSettings.SetDefaults()
+}
+
+func (o *Config) IsValid() *AppError {
+ if len(*o.ServiceSettings.SiteURL) == 0 && *o.EmailSettings.EnableEmailBatching {
+ return NewAppError("Config.IsValid", "model.config.is_valid.site_url_email_batching.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *o.ClusterSettings.Enable && *o.EmailSettings.EnableEmailBatching {
+ return NewAppError("Config.IsValid", "model.config.is_valid.cluster_email_batching.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if err := o.TeamSettings.isValid(); err != nil {
+ return err
+ }
+
+ if err := o.SqlSettings.isValid(); err != nil {
+ return err
+ }
+
+ if err := o.FileSettings.isValid(); err != nil {
+ return err
+ }
+
+ if err := o.EmailSettings.isValid(); err != nil {
+ return err
+ }
+
+ if err := o.LdapSettings.isValid(); err != nil {
+ return err
+ }
+
+ if err := o.SamlSettings.isValid(); err != nil {
+ return err
+ }
+
+ if *o.PasswordSettings.MinimumLength < PASSWORD_MINIMUM_LENGTH || *o.PasswordSettings.MinimumLength > PASSWORD_MAXIMUM_LENGTH {
+ return NewAppError("Config.IsValid", "model.config.is_valid.password_length.app_error", map[string]interface{}{"MinLength": PASSWORD_MINIMUM_LENGTH, "MaxLength": PASSWORD_MAXIMUM_LENGTH}, "", http.StatusBadRequest)
+ }
+
+ if err := o.RateLimitSettings.isValid(); err != nil {
+ return err
+ }
+
+ if err := o.WebrtcSettings.isValid(); err != nil {
+ return err
+ }
+
+ if err := o.ServiceSettings.isValid(); err != nil {
+ return err
+ }
+
+ if err := o.ElasticsearchSettings.isValid(); err != nil {
+ return err
+ }
+
+ if err := o.DataRetentionSettings.isValid(); err != nil {
+ return err
+ }
+
+ if err := o.LocalizationSettings.isValid(); err != nil {
+ return err
+ }
+
+ if err := o.MessageExportSettings.isValid(o.FileSettings); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (ts *TeamSettings) isValid() *AppError {
+ if *ts.MaxUsersPerTeam <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.max_users.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ts.MaxChannelsPerTeam <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.max_channels.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ts.MaxNotificationsPerChannel <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.max_notify_per_channel.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if !(*ts.RestrictDirectMessage == DIRECT_MESSAGE_ANY || *ts.RestrictDirectMessage == DIRECT_MESSAGE_TEAM) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.restrict_direct_message.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if !(*ts.TeammateNameDisplay == SHOW_FULLNAME || *ts.TeammateNameDisplay == SHOW_NICKNAME_FULLNAME || *ts.TeammateNameDisplay == SHOW_USERNAME) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.teammate_name_display.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(ts.SiteName) > SITENAME_MAX_LENGTH {
+ return NewAppError("Config.IsValid", "model.config.is_valid.sitename_length.app_error", map[string]interface{}{"MaxLength": SITENAME_MAX_LENGTH}, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (ss *SqlSettings) isValid() *AppError {
+ if len(ss.AtRestEncryptKey) < 32 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.encrypt_sql.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if !(*ss.DriverName == DATABASE_DRIVER_MYSQL || *ss.DriverName == DATABASE_DRIVER_POSTGRES) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.sql_driver.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ss.MaxIdleConns <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.sql_idle.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ss.QueryTimeout <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.sql_query_timeout.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(*ss.DataSource) == 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.sql_data_src.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ss.MaxOpenConns <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.sql_max_conn.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (fs *FileSettings) isValid() *AppError {
+ if *fs.MaxFileSize <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.max_file_size.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if !(*fs.DriverName == IMAGE_DRIVER_LOCAL || *fs.DriverName == IMAGE_DRIVER_S3) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.file_driver.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(*fs.PublicLinkSalt) < 32 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.file_salt.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (es *EmailSettings) isValid() *AppError {
+ if !(es.ConnectionSecurity == CONN_SECURITY_NONE || es.ConnectionSecurity == CONN_SECURITY_TLS || es.ConnectionSecurity == CONN_SECURITY_STARTTLS || es.ConnectionSecurity == CONN_SECURITY_PLAIN) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.email_security.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(es.InviteSalt) < 32 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.email_salt.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *es.EmailBatchingBufferSize <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.email_batching_buffer_size.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *es.EmailBatchingInterval < 30 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.email_batching_interval.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if !(*es.EmailNotificationContentsType == EMAIL_NOTIFICATION_CONTENTS_FULL || *es.EmailNotificationContentsType == EMAIL_NOTIFICATION_CONTENTS_GENERIC) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.email_notification_contents_type.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (rls *RateLimitSettings) isValid() *AppError {
+ if *rls.MemoryStoreSize <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.rate_mem.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *rls.PerSec <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.rate_sec.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *rls.MaxBurst <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.max_burst.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (ls *LdapSettings) isValid() *AppError {
+ if !(*ls.ConnectionSecurity == CONN_SECURITY_NONE || *ls.ConnectionSecurity == CONN_SECURITY_TLS || *ls.ConnectionSecurity == CONN_SECURITY_STARTTLS) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.ldap_security.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ls.SyncIntervalMinutes <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.ldap_sync_interval.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ls.MaxPageSize < 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.ldap_max_page_size.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ls.Enable {
+ if *ls.LdapServer == "" {
+ return NewAppError("Config.IsValid", "model.config.is_valid.ldap_server", nil, "", http.StatusBadRequest)
+ }
+
+ if *ls.BaseDN == "" {
+ return NewAppError("Config.IsValid", "model.config.is_valid.ldap_basedn", nil, "", http.StatusBadRequest)
+ }
+
+ if *ls.EmailAttribute == "" {
+ return NewAppError("Config.IsValid", "model.config.is_valid.ldap_email", nil, "", http.StatusBadRequest)
+ }
+
+ if *ls.UsernameAttribute == "" {
+ return NewAppError("Config.IsValid", "model.config.is_valid.ldap_username", nil, "", http.StatusBadRequest)
+ }
+
+ if *ls.IdAttribute == "" {
+ return NewAppError("Config.IsValid", "model.config.is_valid.ldap_id", nil, "", http.StatusBadRequest)
+ }
+ }
+
+ return nil
+}
+
+func (ss *SamlSettings) isValid() *AppError {
+ if *ss.Enable {
+ if len(*ss.IdpUrl) == 0 || !IsValidHttpUrl(*ss.IdpUrl) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_url.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(*ss.IdpDescriptorUrl) == 0 || !IsValidHttpUrl(*ss.IdpDescriptorUrl) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_descriptor_url.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(*ss.IdpCertificateFile) == 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_cert.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(*ss.EmailAttribute) == 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.saml_email_attribute.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(*ss.UsernameAttribute) == 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.saml_username_attribute.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ss.Verify {
+ if len(*ss.AssertionConsumerServiceURL) == 0 || !IsValidHttpUrl(*ss.AssertionConsumerServiceURL) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.saml_assertion_consumer_service_url.app_error", nil, "", http.StatusBadRequest)
+ }
+ }
+
+ if *ss.Encrypt {
+ if len(*ss.PrivateKeyFile) == 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.saml_private_key.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(*ss.PublicCertificateFile) == 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.saml_public_cert.app_error", nil, "", http.StatusBadRequest)
+ }
+ }
+
+ if len(*ss.EmailAttribute) == 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.saml_email_attribute.app_error", nil, "", http.StatusBadRequest)
+ }
+ }
+
+ return nil
+}
+
+func (ws *WebrtcSettings) isValid() *AppError {
+ if *ws.Enable {
+ if len(*ws.GatewayWebsocketUrl) == 0 || !IsValidWebsocketUrl(*ws.GatewayWebsocketUrl) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_gateway_ws_url.app_error", nil, "", http.StatusBadRequest)
+ } else if len(*ws.GatewayAdminUrl) == 0 || !IsValidHttpUrl(*ws.GatewayAdminUrl) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_gateway_admin_url.app_error", nil, "", http.StatusBadRequest)
+ } else if len(*ws.GatewayAdminSecret) == 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_gateway_admin_secret.app_error", nil, "", http.StatusBadRequest)
+ } else if len(*ws.StunURI) != 0 && !IsValidTurnOrStunServer(*ws.StunURI) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_stun_uri.app_error", nil, "", http.StatusBadRequest)
+ } else if len(*ws.TurnURI) != 0 {
+ if !IsValidTurnOrStunServer(*ws.TurnURI) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_turn_uri.app_error", nil, "", http.StatusBadRequest)
+ }
+ if len(*ws.TurnUsername) == 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_turn_username.app_error", nil, "", http.StatusBadRequest)
+ } else if len(*ws.TurnSharedKey) == 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_turn_shared_key.app_error", nil, "", http.StatusBadRequest)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (ss *ServiceSettings) isValid() *AppError {
+ if !(*ss.ConnectionSecurity == CONN_SECURITY_NONE || *ss.ConnectionSecurity == CONN_SECURITY_TLS) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.webserver_security.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ss.ReadTimeout <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.read_timeout.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ss.WriteTimeout <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.write_timeout.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ss.TimeBetweenUserTypingUpdatesMilliseconds < 1000 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.time_between_user_typing.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ss.MaximumLoginAttempts <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.login_attempts.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(*ss.SiteURL) != 0 {
+ if _, err := url.ParseRequestURI(*ss.SiteURL); err != nil {
+ return NewAppError("Config.IsValid", "model.config.is_valid.site_url.app_error", nil, "", http.StatusBadRequest)
+ }
+ }
+
+ if len(*ss.ListenAddress) == 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.listen_address.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ss.ExperimentalGroupUnreadChannels != GROUP_UNREAD_CHANNELS_DISABLED &&
+ *ss.ExperimentalGroupUnreadChannels != GROUP_UNREAD_CHANNELS_DEFAULT_ON &&
+ *ss.ExperimentalGroupUnreadChannels != GROUP_UNREAD_CHANNELS_DEFAULT_OFF {
+ return NewAppError("Config.IsValid", "model.config.is_valid.group_unread_channels.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ switch *ss.ImageProxyType {
+ case "", "willnorris/imageproxy":
+ case "atmos/camo":
+ if *ss.ImageProxyOptions == "" {
+ return NewAppError("Config.IsValid", "model.config.is_valid.atmos_camo_image_proxy_options.app_error", nil, "", http.StatusBadRequest)
+ }
+ default:
+ return NewAppError("Config.IsValid", "model.config.is_valid.image_proxy_type.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (ess *ElasticsearchSettings) isValid() *AppError {
+ if *ess.EnableIndexing {
+ if len(*ess.ConnectionUrl) == 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.connection_url.app_error", nil, "", http.StatusBadRequest)
+ }
+ }
+
+ if *ess.EnableSearching && !*ess.EnableIndexing {
+ return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.enable_searching.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ess.AggregatePostsAfterDays < 1 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.aggregate_posts_after_days.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if _, err := time.Parse("15:04", *ess.PostsAggregatorJobStartTime); err != nil {
+ return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.posts_aggregator_job_start_time.app_error", nil, err.Error(), http.StatusBadRequest)
+ }
+
+ if *ess.LiveIndexingBatchSize < 1 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.live_indexing_batch_size.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ess.BulkIndexingTimeWindowSeconds < 1 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.bulk_indexing_time_window_seconds.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *ess.RequestTimeoutSeconds < 1 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.request_timeout_seconds.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (drs *DataRetentionSettings) isValid() *AppError {
+ if *drs.MessageRetentionDays <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.message_retention_days_too_low.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *drs.FileRetentionDays <= 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.file_retention_days_too_low.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if _, err := time.Parse("15:04", *drs.DeletionJobStartTime); err != nil {
+ return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.deletion_job_start_time.app_error", nil, err.Error(), http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (ls *LocalizationSettings) isValid() *AppError {
+ if len(*ls.AvailableLocales) > 0 {
+ if !strings.Contains(*ls.AvailableLocales, *ls.DefaultClientLocale) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.localization.available_locales.app_error", nil, "", http.StatusBadRequest)
+ }
+ }
+
+ return nil
+}
+
+func (mes *MessageExportSettings) isValid(fs FileSettings) *AppError {
+ if mes.EnableExport == nil {
+ return NewAppError("Config.IsValid", "model.config.is_valid.message_export.enable.app_error", nil, "", http.StatusBadRequest)
+ }
+ if *mes.EnableExport {
+ if mes.ExportFromTimestamp == nil || *mes.ExportFromTimestamp < 0 || *mes.ExportFromTimestamp > GetMillis() {
+ return NewAppError("Config.IsValid", "model.config.is_valid.message_export.export_from.app_error", nil, "", http.StatusBadRequest)
+ } else if mes.DailyRunTime == nil {
+ return NewAppError("Config.IsValid", "model.config.is_valid.message_export.daily_runtime.app_error", nil, "", http.StatusBadRequest)
+ } else if _, err := time.Parse("15:04", *mes.DailyRunTime); err != nil {
+ return NewAppError("Config.IsValid", "model.config.is_valid.message_export.daily_runtime.app_error", nil, err.Error(), http.StatusBadRequest)
+ } else if mes.BatchSize == nil || *mes.BatchSize < 0 {
+ return NewAppError("Config.IsValid", "model.config.is_valid.message_export.batch_size.app_error", nil, "", http.StatusBadRequest)
+ } else if mes.ExportFormat == nil || (*mes.ExportFormat != COMPLIANCE_EXPORT_TYPE_ACTIANCE && *mes.ExportFormat != COMPLIANCE_EXPORT_TYPE_GLOBALRELAY) {
+ return NewAppError("Config.IsValid", "model.config.is_valid.message_export.export_type.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if *mes.ExportFormat == COMPLIANCE_EXPORT_TYPE_GLOBALRELAY {
+ // validating email addresses is hard - just make sure it contains an '@' sign
+ // see https://stackoverflow.com/questions/201323/using-a-regular-expression-to-validate-an-email-address
+ if mes.GlobalRelayEmailAddress == nil || !strings.Contains(*mes.GlobalRelayEmailAddress, "@") {
+ return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay_email_address.app_error", nil, "", http.StatusBadRequest)
+ }
+ }
+ }
+ return nil
+}
+
+func (o *Config) GetSanitizeOptions() map[string]bool {
+ options := map[string]bool{}
+ options["fullname"] = o.PrivacySettings.ShowFullName
+ options["email"] = o.PrivacySettings.ShowEmailAddress
+
+ return options
+}
+
+func (o *Config) Sanitize() {
+ if o.LdapSettings.BindPassword != nil && len(*o.LdapSettings.BindPassword) > 0 {
+ *o.LdapSettings.BindPassword = FAKE_SETTING
+ }
+
+ *o.FileSettings.PublicLinkSalt = FAKE_SETTING
+ if len(o.FileSettings.AmazonS3SecretAccessKey) > 0 {
+ o.FileSettings.AmazonS3SecretAccessKey = FAKE_SETTING
+ }
+
+ o.EmailSettings.InviteSalt = FAKE_SETTING
+ if len(o.EmailSettings.SMTPPassword) > 0 {
+ o.EmailSettings.SMTPPassword = FAKE_SETTING
+ }
+
+ if len(o.GitLabSettings.Secret) > 0 {
+ o.GitLabSettings.Secret = FAKE_SETTING
+ }
+
+ *o.SqlSettings.DataSource = FAKE_SETTING
+ o.SqlSettings.AtRestEncryptKey = FAKE_SETTING
+
+ for i := range o.SqlSettings.DataSourceReplicas {
+ o.SqlSettings.DataSourceReplicas[i] = FAKE_SETTING
+ }
+
+ for i := range o.SqlSettings.DataSourceSearchReplicas {
+ o.SqlSettings.DataSourceSearchReplicas[i] = FAKE_SETTING
+ }
+
+ *o.ElasticsearchSettings.Password = FAKE_SETTING
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/data_retention_policy.go b/vendor/github.com/mattermost/mattermost-server/model/data_retention_policy.go
new file mode 100644
index 00000000..dbb13374
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/data_retention_policy.go
@@ -0,0 +1,27 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type DataRetentionPolicy struct {
+ MessageDeletionEnabled bool `json:"message_deletion_enabled"`
+ FileDeletionEnabled bool `json:"file_deletion_enabled"`
+ MessageRetentionCutoff int64 `json:"message_retention_cutoff"`
+ FileRetentionCutoff int64 `json:"file_retention_cutoff"`
+}
+
+func (me *DataRetentionPolicy) ToJson() string {
+ b, _ := json.Marshal(me)
+ return string(b)
+}
+
+func DataRetentionPolicyFromJson(data io.Reader) *DataRetentionPolicy {
+ var me *DataRetentionPolicy
+ json.NewDecoder(data).Decode(&me)
+ return me
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/emoji.go b/vendor/github.com/mattermost/mattermost-server/model/emoji.go
new file mode 100644
index 00000000..a1703abb
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/emoji.go
@@ -0,0 +1,83 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+)
+
+const (
+ EMOJI_NAME_MAX_LENGTH = 64
+ EMOJI_SORT_BY_NAME = "name"
+)
+
+type Emoji struct {
+ Id string `json:"id"`
+ CreateAt int64 `json:"create_at"`
+ UpdateAt int64 `json:"update_at"`
+ DeleteAt int64 `json:"delete_at"`
+ CreatorId string `json:"creator_id"`
+ Name string `json:"name"`
+}
+
+func (emoji *Emoji) IsValid() *AppError {
+ if len(emoji.Id) != 26 {
+ return NewAppError("Emoji.IsValid", "model.emoji.id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if emoji.CreateAt == 0 {
+ return NewAppError("Emoji.IsValid", "model.emoji.create_at.app_error", nil, "id="+emoji.Id, http.StatusBadRequest)
+ }
+
+ if emoji.UpdateAt == 0 {
+ return NewAppError("Emoji.IsValid", "model.emoji.update_at.app_error", nil, "id="+emoji.Id, http.StatusBadRequest)
+ }
+
+ if len(emoji.CreatorId) != 26 {
+ return NewAppError("Emoji.IsValid", "model.emoji.user_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(emoji.Name) == 0 || len(emoji.Name) > EMOJI_NAME_MAX_LENGTH || !IsValidAlphaNumHyphenUnderscore(emoji.Name, false) {
+ return NewAppError("Emoji.IsValid", "model.emoji.name.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (emoji *Emoji) PreSave() {
+ if emoji.Id == "" {
+ emoji.Id = NewId()
+ }
+
+ emoji.CreateAt = GetMillis()
+ emoji.UpdateAt = emoji.CreateAt
+}
+
+func (emoji *Emoji) PreUpdate() {
+ emoji.UpdateAt = GetMillis()
+}
+
+func (emoji *Emoji) ToJson() string {
+ b, _ := json.Marshal(emoji)
+ return string(b)
+}
+
+func EmojiFromJson(data io.Reader) *Emoji {
+ var emoji *Emoji
+ json.NewDecoder(data).Decode(&emoji)
+ return emoji
+}
+
+func EmojiListToJson(emojiList []*Emoji) string {
+ b, _ := json.Marshal(emojiList)
+ return string(b)
+}
+
+func EmojiListFromJson(data io.Reader) []*Emoji {
+ var emojiList []*Emoji
+ json.NewDecoder(data).Decode(&emojiList)
+ return emojiList
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/emoji_search.go b/vendor/github.com/mattermost/mattermost-server/model/emoji_search.go
new file mode 100644
index 00000000..3a768a57
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/emoji_search.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2018-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type EmojiSearch struct {
+ Term string `json:"term"`
+ PrefixOnly bool `json:"prefix_only"`
+}
+
+func (es *EmojiSearch) ToJson() string {
+ b, _ := json.Marshal(es)
+ return string(b)
+}
+
+func EmojiSearchFromJson(data io.Reader) *EmojiSearch {
+ var es *EmojiSearch
+ json.NewDecoder(data).Decode(&es)
+ return es
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/file.go b/vendor/github.com/mattermost/mattermost-server/model/file.go
new file mode 100644
index 00000000..c7ffbf0b
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/file.go
@@ -0,0 +1,34 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+const (
+ MaxImageSize = 6048 * 4032 // 24 megapixels, roughly 36MB as a raw image
+)
+
+var (
+ IMAGE_EXTENSIONS = [5]string{".jpg", ".jpeg", ".gif", ".bmp", ".png"}
+ IMAGE_MIME_TYPES = map[string]string{".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".gif": "image/gif", ".bmp": "image/bmp", ".png": "image/png", ".tiff": "image/tiff"}
+)
+
+type FileUploadResponse struct {
+ FileInfos []*FileInfo `json:"file_infos"`
+ ClientIds []string `json:"client_ids"`
+}
+
+func FileUploadResponseFromJson(data io.Reader) *FileUploadResponse {
+ var o *FileUploadResponse
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o *FileUploadResponse) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/file_info.go b/vendor/github.com/mattermost/mattermost-server/model/file_info.go
new file mode 100644
index 00000000..e0bbfcfc
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/file_info.go
@@ -0,0 +1,170 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "bytes"
+ "encoding/json"
+ "image"
+ "image/gif"
+ "io"
+ "mime"
+ "net/http"
+ "path/filepath"
+ "strings"
+)
+
+type FileInfo struct {
+ Id string `json:"id"`
+ CreatorId string `json:"user_id"`
+ PostId string `json:"post_id,omitempty"`
+ CreateAt int64 `json:"create_at"`
+ UpdateAt int64 `json:"update_at"`
+ DeleteAt int64 `json:"delete_at"`
+ Path string `json:"-"` // not sent back to the client
+ ThumbnailPath string `json:"-"` // not sent back to the client
+ PreviewPath string `json:"-"` // not sent back to the client
+ Name string `json:"name"`
+ Extension string `json:"extension"`
+ Size int64 `json:"size"`
+ MimeType string `json:"mime_type"`
+ Width int `json:"width,omitempty"`
+ Height int `json:"height,omitempty"`
+ HasPreviewImage bool `json:"has_preview_image,omitempty"`
+}
+
+func (info *FileInfo) ToJson() string {
+ b, _ := json.Marshal(info)
+ return string(b)
+}
+
+func FileInfoFromJson(data io.Reader) *FileInfo {
+ decoder := json.NewDecoder(data)
+
+ var info FileInfo
+ if err := decoder.Decode(&info); err != nil {
+ return nil
+ } else {
+ return &info
+ }
+}
+
+func FileInfosToJson(infos []*FileInfo) string {
+ b, _ := json.Marshal(infos)
+ return string(b)
+}
+
+func FileInfosFromJson(data io.Reader) []*FileInfo {
+ decoder := json.NewDecoder(data)
+
+ var infos []*FileInfo
+ if err := decoder.Decode(&infos); err != nil {
+ return nil
+ } else {
+ return infos
+ }
+}
+
+func (o *FileInfo) PreSave() {
+ if o.Id == "" {
+ o.Id = NewId()
+ }
+
+ if o.CreateAt == 0 {
+ o.CreateAt = GetMillis()
+ }
+
+ if o.UpdateAt < o.CreateAt {
+ o.UpdateAt = o.CreateAt
+ }
+}
+
+func (o *FileInfo) IsValid() *AppError {
+ if len(o.Id) != 26 {
+ return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.CreatorId) != 26 {
+ return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.user_id.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if len(o.PostId) != 0 && len(o.PostId) != 26 {
+ return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.post_id.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if o.CreateAt == 0 {
+ return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if o.UpdateAt == 0 {
+ return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if o.Path == "" {
+ return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.path.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (o *FileInfo) IsImage() bool {
+ return strings.HasPrefix(o.MimeType, "image")
+}
+
+func GetInfoForBytes(name string, data []byte) (*FileInfo, *AppError) {
+ info := &FileInfo{
+ Name: name,
+ Size: int64(len(data)),
+ }
+ var err *AppError
+
+ extension := strings.ToLower(filepath.Ext(name))
+ info.MimeType = mime.TypeByExtension(extension)
+
+ if extension != "" && extension[0] == '.' {
+ // The client expects a file extension without the leading period
+ info.Extension = extension[1:]
+ } else {
+ info.Extension = extension
+ }
+
+ if info.IsImage() {
+ // Only set the width and height if it's actually an image that we can understand
+ if config, _, err := image.DecodeConfig(bytes.NewReader(data)); err == nil {
+ info.Width = config.Width
+ info.Height = config.Height
+
+ if info.MimeType == "image/gif" {
+ // Just show the gif itself instead of a preview image for animated gifs
+ if gifConfig, err := gif.DecodeAll(bytes.NewReader(data)); err != nil {
+ // Still return the rest of the info even though it doesn't appear to be an actual gif
+ info.HasPreviewImage = true
+ err = NewAppError("GetInfoForBytes", "model.file_info.get.gif.app_error", nil, "name="+name, http.StatusBadRequest)
+ } else {
+ info.HasPreviewImage = len(gifConfig.Image) == 1
+ }
+ } else {
+ info.HasPreviewImage = true
+ }
+ }
+ }
+
+ return info, err
+}
+
+func GetEtagForFileInfos(infos []*FileInfo) string {
+ if len(infos) == 0 {
+ return Etag()
+ }
+
+ var maxUpdateAt int64
+
+ for _, info := range infos {
+ if info.UpdateAt > maxUpdateAt {
+ maxUpdateAt = info.UpdateAt
+ }
+ }
+
+ return Etag(infos[0].PostId, maxUpdateAt)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/gitlab.go b/vendor/github.com/mattermost/mattermost-server/model/gitlab.go
new file mode 100644
index 00000000..8777614c
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/gitlab.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+const (
+ USER_AUTH_SERVICE_GITLAB = "gitlab"
+)
diff --git a/vendor/github.com/mattermost/mattermost-server/model/gitlab/gitlab.go b/vendor/github.com/mattermost/mattermost-server/model/gitlab/gitlab.go
new file mode 100644
index 00000000..7e0cb10a
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/gitlab/gitlab.go
@@ -0,0 +1,114 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package oauthgitlab
+
+import (
+ "encoding/json"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/mattermost/mattermost-server/einterfaces"
+ "github.com/mattermost/mattermost-server/model"
+)
+
+type GitLabProvider struct {
+}
+
+type GitLabUser struct {
+ Id int64 `json:"id"`
+ Username string `json:"username"`
+ Login string `json:"login"`
+ Email string `json:"email"`
+ Name string `json:"name"`
+}
+
+func init() {
+ provider := &GitLabProvider{}
+ einterfaces.RegisterOauthProvider(model.USER_AUTH_SERVICE_GITLAB, provider)
+}
+
+func userFromGitLabUser(glu *GitLabUser) *model.User {
+ user := &model.User{}
+ username := glu.Username
+ if username == "" {
+ username = glu.Login
+ }
+ user.Username = model.CleanUsername(username)
+ splitName := strings.Split(glu.Name, " ")
+ if len(splitName) == 2 {
+ user.FirstName = splitName[0]
+ user.LastName = splitName[1]
+ } else if len(splitName) >= 2 {
+ user.FirstName = splitName[0]
+ user.LastName = strings.Join(splitName[1:], " ")
+ } else {
+ user.FirstName = glu.Name
+ }
+ user.Email = glu.Email
+ userId := strconv.FormatInt(glu.Id, 10)
+ user.AuthData = &userId
+ user.AuthService = model.USER_AUTH_SERVICE_GITLAB
+
+ return user
+}
+
+func gitLabUserFromJson(data io.Reader) *GitLabUser {
+ decoder := json.NewDecoder(data)
+ var glu GitLabUser
+ err := decoder.Decode(&glu)
+ if err == nil {
+ return &glu
+ } else {
+ return nil
+ }
+}
+
+func (glu *GitLabUser) ToJson() string {
+ b, err := json.Marshal(glu)
+ if err != nil {
+ return ""
+ } else {
+ return string(b)
+ }
+}
+
+func (glu *GitLabUser) IsValid() bool {
+ if glu.Id == 0 {
+ return false
+ }
+
+ if len(glu.Email) == 0 {
+ return false
+ }
+
+ return true
+}
+
+func (glu *GitLabUser) getAuthData() string {
+ return strconv.FormatInt(glu.Id, 10)
+}
+
+func (m *GitLabProvider) GetIdentifier() string {
+ return model.USER_AUTH_SERVICE_GITLAB
+}
+
+func (m *GitLabProvider) GetUserFromJson(data io.Reader) *model.User {
+ glu := gitLabUserFromJson(data)
+ if glu.IsValid() {
+ return userFromGitLabUser(glu)
+ }
+
+ return &model.User{}
+}
+
+func (m *GitLabProvider) GetAuthDataFromJson(data io.Reader) string {
+ glu := gitLabUserFromJson(data)
+
+ if glu.IsValid() {
+ return glu.getAuthData()
+ }
+
+ return ""
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/incoming_webhook.go b/vendor/github.com/mattermost/mattermost-server/model/incoming_webhook.go
new file mode 100644
index 00000000..b38cfeec
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/incoming_webhook.go
@@ -0,0 +1,206 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "net/http"
+ "regexp"
+)
+
+const (
+ DEFAULT_WEBHOOK_USERNAME = "webhook"
+)
+
+type IncomingWebhook struct {
+ Id string `json:"id"`
+ CreateAt int64 `json:"create_at"`
+ UpdateAt int64 `json:"update_at"`
+ DeleteAt int64 `json:"delete_at"`
+ UserId string `json:"user_id"`
+ ChannelId string `json:"channel_id"`
+ TeamId string `json:"team_id"`
+ DisplayName string `json:"display_name"`
+ Description string `json:"description"`
+ Username string `json:"username"`
+ IconURL string `json:"icon_url"`
+}
+
+type IncomingWebhookRequest struct {
+ Text string `json:"text"`
+ Username string `json:"username"`
+ IconURL string `json:"icon_url"`
+ ChannelName string `json:"channel"`
+ Props StringInterface `json:"props"`
+ Attachments []*SlackAttachment `json:"attachments"`
+ Type string `json:"type"`
+}
+
+func (o *IncomingWebhook) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func IncomingWebhookFromJson(data io.Reader) *IncomingWebhook {
+ var o *IncomingWebhook
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func IncomingWebhookListToJson(l []*IncomingWebhook) string {
+ b, _ := json.Marshal(l)
+ return string(b)
+}
+
+func IncomingWebhookListFromJson(data io.Reader) []*IncomingWebhook {
+ var o []*IncomingWebhook
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o *IncomingWebhook) IsValid() *AppError {
+
+ if len(o.Id) != 26 {
+ return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.id.app_error", nil, "", http.StatusBadRequest)
+
+ }
+
+ if o.CreateAt == 0 {
+ return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if o.UpdateAt == 0 {
+ return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if len(o.UserId) != 26 {
+ return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.user_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.ChannelId) != 26 {
+ return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.channel_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.TeamId) != 26 {
+ return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.team_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.DisplayName) > 64 {
+ return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.display_name.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.Description) > 128 {
+ return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.description.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.Username) > 64 {
+ return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.username.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.IconURL) > 1024 {
+ return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.icon_url.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (o *IncomingWebhook) PreSave() {
+ if o.Id == "" {
+ o.Id = NewId()
+ }
+
+ o.CreateAt = GetMillis()
+ o.UpdateAt = o.CreateAt
+}
+
+func (o *IncomingWebhook) PreUpdate() {
+ o.UpdateAt = GetMillis()
+}
+
+// escapeControlCharsFromPayload escapes control chars (\n, \t) from a byte slice.
+// Context:
+// JSON strings are not supposed to contain control characters such as \n, \t,
+// ... but some incoming webhooks might still send invalid JSON and we want to
+// try to handle that. An example invalid JSON string from an incoming webhook
+// might look like this (strings for both "text" and "fallback" attributes are
+// invalid JSON strings because they contain unescaped newlines and tabs):
+// `{
+// "text": "this is a test
+// that contains a newline and tabs",
+// "attachments": [
+// {
+// "fallback": "Required plain-text summary of the attachment
+// that contains a newline and tabs",
+// "color": "#36a64f",
+// ...
+// "text": "Optional text that appears within the attachment
+// that contains a newline and tabs",
+// ...
+// "thumb_url": "http://example.com/path/to/thumb.png"
+// }
+// ]
+// }`
+// This function will search for `"key": "value"` pairs, and escape \n, \t
+// from the value.
+func escapeControlCharsFromPayload(by []byte) []byte {
+ // we'll search for `"text": "..."` or `"fallback": "..."`, ...
+ keys := "text|fallback|pretext|author_name|title|value"
+
+ // the regexp reads like this:
+ // (?s): this flag let . match \n (default is false)
+ // "(keys)": we search for the keys defined above
+ // \s*:\s*: followed by 0..n spaces/tabs, a colon then 0..n spaces/tabs
+ // ": a double-quote
+ // (\\"|[^"])*: any number of times the `\"` string or any char but a double-quote
+ // ": a double-quote
+ r := `(?s)"(` + keys + `)"\s*:\s*"(\\"|[^"])*"`
+ re := regexp.MustCompile(r)
+
+ // the function that will escape \n and \t on the regexp matches
+ repl := func(b []byte) []byte {
+ if bytes.Contains(b, []byte("\n")) {
+ b = bytes.Replace(b, []byte("\n"), []byte("\\n"), -1)
+ }
+ if bytes.Contains(b, []byte("\t")) {
+ b = bytes.Replace(b, []byte("\t"), []byte("\\t"), -1)
+ }
+
+ return b
+ }
+
+ return re.ReplaceAllFunc(by, repl)
+}
+
+func decodeIncomingWebhookRequest(by []byte) (*IncomingWebhookRequest, error) {
+ decoder := json.NewDecoder(bytes.NewReader(by))
+ var o IncomingWebhookRequest
+ err := decoder.Decode(&o)
+ if err == nil {
+ return &o, nil
+ } else {
+ return nil, err
+ }
+}
+
+func IncomingWebhookRequestFromJson(data io.Reader) (*IncomingWebhookRequest, *AppError) {
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(data)
+ by := buf.Bytes()
+
+ // Try to decode the JSON data. Only if it fails, try to escape control
+ // characters from the strings contained in the JSON data.
+ o, err := decodeIncomingWebhookRequest(by)
+ if err != nil {
+ o, err = decodeIncomingWebhookRequest(escapeControlCharsFromPayload(by))
+ if err != nil {
+ return nil, NewAppError("IncomingWebhookRequestFromJson", "Unable to parse incoming data", nil, err.Error(), http.StatusBadRequest)
+ }
+ }
+
+ o.Attachments = StringifySlackFieldValue(o.Attachments)
+
+ return o, nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/initial_load.go b/vendor/github.com/mattermost/mattermost-server/model/initial_load.go
new file mode 100644
index 00000000..3be68044
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/initial_load.go
@@ -0,0 +1,30 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type InitialLoad struct {
+ User *User `json:"user"`
+ TeamMembers []*TeamMember `json:"team_members"`
+ Teams []*Team `json:"teams"`
+ Preferences Preferences `json:"preferences"`
+ ClientCfg map[string]string `json:"client_cfg"`
+ LicenseCfg map[string]string `json:"license_cfg"`
+ NoAccounts bool `json:"no_accounts"`
+}
+
+func (me *InitialLoad) ToJson() string {
+ b, _ := json.Marshal(me)
+ return string(b)
+}
+
+func InitialLoadFromJson(data io.Reader) *InitialLoad {
+ var o *InitialLoad
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/job.go b/vendor/github.com/mattermost/mattermost-server/model/job.go
new file mode 100644
index 00000000..e10ed1f5
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/job.go
@@ -0,0 +1,118 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "time"
+)
+
+const (
+ JOB_TYPE_DATA_RETENTION = "data_retention"
+ JOB_TYPE_MESSAGE_EXPORT = "message_export"
+ JOB_TYPE_ELASTICSEARCH_POST_INDEXING = "elasticsearch_post_indexing"
+ JOB_TYPE_ELASTICSEARCH_POST_AGGREGATION = "elasticsearch_post_aggregation"
+ JOB_TYPE_LDAP_SYNC = "ldap_sync"
+
+ JOB_STATUS_PENDING = "pending"
+ JOB_STATUS_IN_PROGRESS = "in_progress"
+ JOB_STATUS_SUCCESS = "success"
+ JOB_STATUS_ERROR = "error"
+ JOB_STATUS_CANCEL_REQUESTED = "cancel_requested"
+ JOB_STATUS_CANCELED = "canceled"
+)
+
+type Job struct {
+ Id string `json:"id"`
+ Type string `json:"type"`
+ Priority int64 `json:"priority"`
+ CreateAt int64 `json:"create_at"`
+ StartAt int64 `json:"start_at"`
+ LastActivityAt int64 `json:"last_activity_at"`
+ Status string `json:"status"`
+ Progress int64 `json:"progress"`
+ Data map[string]string `json:"data"`
+}
+
+func (j *Job) IsValid() *AppError {
+ if len(j.Id) != 26 {
+ return NewAppError("Job.IsValid", "model.job.is_valid.id.app_error", nil, "id="+j.Id, http.StatusBadRequest)
+ }
+
+ if j.CreateAt == 0 {
+ return NewAppError("Job.IsValid", "model.job.is_valid.create_at.app_error", nil, "id="+j.Id, http.StatusBadRequest)
+ }
+
+ switch j.Type {
+ case JOB_TYPE_DATA_RETENTION:
+ case JOB_TYPE_ELASTICSEARCH_POST_INDEXING:
+ case JOB_TYPE_ELASTICSEARCH_POST_AGGREGATION:
+ case JOB_TYPE_LDAP_SYNC:
+ case JOB_TYPE_MESSAGE_EXPORT:
+ default:
+ return NewAppError("Job.IsValid", "model.job.is_valid.type.app_error", nil, "id="+j.Id, http.StatusBadRequest)
+ }
+
+ switch j.Status {
+ case JOB_STATUS_PENDING:
+ case JOB_STATUS_IN_PROGRESS:
+ case JOB_STATUS_SUCCESS:
+ case JOB_STATUS_ERROR:
+ case JOB_STATUS_CANCEL_REQUESTED:
+ case JOB_STATUS_CANCELED:
+ default:
+ return NewAppError("Job.IsValid", "model.job.is_valid.status.app_error", nil, "id="+j.Id, http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (js *Job) ToJson() string {
+ b, _ := json.Marshal(js)
+ return string(b)
+}
+
+func JobFromJson(data io.Reader) *Job {
+ var job Job
+ if err := json.NewDecoder(data).Decode(&job); err == nil {
+ return &job
+ } else {
+ return nil
+ }
+}
+
+func JobsToJson(jobs []*Job) string {
+ b, _ := json.Marshal(jobs)
+ return string(b)
+}
+
+func JobsFromJson(data io.Reader) []*Job {
+ var jobs []*Job
+ if err := json.NewDecoder(data).Decode(&jobs); err == nil {
+ return jobs
+ } else {
+ return nil
+ }
+}
+
+func (js *Job) DataToJson() string {
+ b, _ := json.Marshal(js.Data)
+ return string(b)
+}
+
+type Worker interface {
+ Run()
+ Stop()
+ JobChannel() chan<- Job
+}
+
+type Scheduler interface {
+ Name() string
+ JobType() string
+ Enabled(cfg *Config) bool
+ NextScheduleTime(cfg *Config, now time.Time, pendingJobs bool, lastSuccessfulJob *Job) *time.Time
+ ScheduleJob(cfg *Config, pendingJobs bool, lastSuccessfulJob *Job) (*Job, *AppError)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/ldap.go b/vendor/github.com/mattermost/mattermost-server/model/ldap.go
new file mode 100644
index 00000000..1453a4ad
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/ldap.go
@@ -0,0 +1,9 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+const (
+ USER_AUTH_SERVICE_LDAP = "ldap"
+ LDAP_SYNC_TASK_NAME = "LDAP Syncronization"
+)
diff --git a/vendor/github.com/mattermost/mattermost-server/model/license.go b/vendor/github.com/mattermost/mattermost-server/model/license.go
new file mode 100644
index 00000000..942a18d5
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/license.go
@@ -0,0 +1,219 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+)
+
+const (
+ EXPIRED_LICENSE_ERROR = "api.license.add_license.expired.app_error"
+ INVALID_LICENSE_ERROR = "api.license.add_license.invalid.app_error"
+)
+
+type LicenseRecord struct {
+ Id string `json:"id"`
+ CreateAt int64 `json:"create_at"`
+ Bytes string `json:"-"`
+}
+
+type License struct {
+ Id string `json:"id"`
+ IssuedAt int64 `json:"issued_at"`
+ StartsAt int64 `json:"starts_at"`
+ ExpiresAt int64 `json:"expires_at"`
+ Customer *Customer `json:"customer"`
+ Features *Features `json:"features"`
+}
+
+type Customer struct {
+ Id string `json:"id"`
+ Name string `json:"name"`
+ Email string `json:"email"`
+ Company string `json:"company"`
+ PhoneNumber string `json:"phone_number"`
+}
+
+type Features struct {
+ Users *int `json:"users"`
+ LDAP *bool `json:"ldap"`
+ MFA *bool `json:"mfa"`
+ GoogleOAuth *bool `json:"google_oauth"`
+ Office365OAuth *bool `json:"office365_oauth"`
+ Compliance *bool `json:"compliance"`
+ Cluster *bool `json:"cluster"`
+ Metrics *bool `json:"metrics"`
+ CustomBrand *bool `json:"custom_brand"`
+ MHPNS *bool `json:"mhpns"`
+ SAML *bool `json:"saml"`
+ PasswordRequirements *bool `json:"password_requirements"`
+ Elasticsearch *bool `json:"elastic_search"`
+ Announcement *bool `json:"announcement"`
+ ThemeManagement *bool `json:"theme_management"`
+ EmailNotificationContents *bool `json:"email_notification_contents"`
+ DataRetention *bool `json:"data_retention"`
+ MessageExport *bool `json:"message_export"`
+
+ // after we enabled more features for webrtc we'll need to control them with this
+ FutureFeatures *bool `json:"future_features"`
+}
+
+func (f *Features) ToMap() map[string]interface{} {
+ return map[string]interface{}{
+ "ldap": *f.LDAP,
+ "mfa": *f.MFA,
+ "google": *f.GoogleOAuth,
+ "office365": *f.Office365OAuth,
+ "compliance": *f.Compliance,
+ "cluster": *f.Cluster,
+ "metrics": *f.Metrics,
+ "custom_brand": *f.CustomBrand,
+ "mhpns": *f.MHPNS,
+ "saml": *f.SAML,
+ "password": *f.PasswordRequirements,
+ "elastic_search": *f.Elasticsearch,
+ "email_notification_contents": *f.EmailNotificationContents,
+ "data_retention": *f.DataRetention,
+ "message_export": *f.MessageExport,
+ "future": *f.FutureFeatures,
+ }
+}
+
+func (f *Features) SetDefaults() {
+ if f.FutureFeatures == nil {
+ f.FutureFeatures = NewBool(true)
+ }
+
+ if f.Users == nil {
+ f.Users = NewInt(0)
+ }
+
+ if f.LDAP == nil {
+ f.LDAP = NewBool(*f.FutureFeatures)
+ }
+
+ if f.MFA == nil {
+ f.MFA = NewBool(*f.FutureFeatures)
+ }
+
+ if f.GoogleOAuth == nil {
+ f.GoogleOAuth = NewBool(*f.FutureFeatures)
+ }
+
+ if f.Office365OAuth == nil {
+ f.Office365OAuth = NewBool(*f.FutureFeatures)
+ }
+
+ if f.Compliance == nil {
+ f.Compliance = NewBool(*f.FutureFeatures)
+ }
+
+ if f.Cluster == nil {
+ f.Cluster = NewBool(*f.FutureFeatures)
+ }
+
+ if f.Metrics == nil {
+ f.Metrics = NewBool(*f.FutureFeatures)
+ }
+
+ if f.CustomBrand == nil {
+ f.CustomBrand = NewBool(*f.FutureFeatures)
+ }
+
+ if f.MHPNS == nil {
+ f.MHPNS = NewBool(*f.FutureFeatures)
+ }
+
+ if f.SAML == nil {
+ f.SAML = NewBool(*f.FutureFeatures)
+ }
+
+ if f.PasswordRequirements == nil {
+ f.PasswordRequirements = NewBool(*f.FutureFeatures)
+ }
+
+ if f.Elasticsearch == nil {
+ f.Elasticsearch = NewBool(*f.FutureFeatures)
+ }
+
+ if f.Announcement == nil {
+ f.Announcement = NewBool(true)
+ }
+
+ if f.ThemeManagement == nil {
+ f.ThemeManagement = NewBool(true)
+ }
+
+ if f.EmailNotificationContents == nil {
+ f.EmailNotificationContents = NewBool(*f.FutureFeatures)
+ }
+
+ if f.DataRetention == nil {
+ f.DataRetention = NewBool(*f.FutureFeatures)
+ }
+
+ if f.MessageExport == nil {
+ f.MessageExport = NewBool(*f.FutureFeatures)
+ }
+}
+
+func (l *License) IsExpired() bool {
+ return l.ExpiresAt < GetMillis()
+}
+
+func (l *License) IsStarted() bool {
+ return l.StartsAt < GetMillis()
+}
+
+func (l *License) ToJson() string {
+ b, _ := json.Marshal(l)
+ return string(b)
+}
+
+// NewTestLicense returns a license that expires in the future and has the given features.
+func NewTestLicense(features ...string) *License {
+ ret := &License{
+ ExpiresAt: GetMillis() + 90*24*60*60*1000,
+ Customer: &Customer{},
+ Features: &Features{},
+ }
+ ret.Features.SetDefaults()
+
+ featureMap := map[string]bool{}
+ for _, feature := range features {
+ featureMap[feature] = true
+ }
+ featureJson, _ := json.Marshal(featureMap)
+ json.Unmarshal(featureJson, &ret.Features)
+
+ return ret
+}
+
+func LicenseFromJson(data io.Reader) *License {
+ var o *License
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (lr *LicenseRecord) IsValid() *AppError {
+ if len(lr.Id) != 26 {
+ return NewAppError("LicenseRecord.IsValid", "model.license_record.is_valid.id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if lr.CreateAt == 0 {
+ return NewAppError("LicenseRecord.IsValid", "model.license_record.is_valid.create_at.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(lr.Bytes) == 0 || len(lr.Bytes) > 10000 {
+ return NewAppError("LicenseRecord.IsValid", "model.license_record.is_valid.create_at.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (lr *LicenseRecord) PreSave() {
+ lr.CreateAt = GetMillis()
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/manifest.go b/vendor/github.com/mattermost/mattermost-server/model/manifest.go
new file mode 100644
index 00000000..5ba4854b
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/manifest.go
@@ -0,0 +1,228 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "gopkg.in/yaml.v2"
+)
+
+const (
+ PLUGIN_CONFIG_TYPE_TEXT = "text"
+ PLUGIN_CONFIG_TYPE_BOOL = "bool"
+ PLUGIN_CONFIG_TYPE_RADIO = "radio"
+ PLUGIN_CONFIG_TYPE_DROPDOWN = "dropdown"
+ PLUGIN_CONFIG_TYPE_GENERATED = "generated"
+ PLUGIN_CONFIG_TYPE_USERNAME = "username"
+)
+
+type PluginOption struct {
+ // The display name for the option.
+ DisplayName string `json:"display_name" yaml:"display_name"`
+
+ // The string value for the option.
+ Value string `json:"value" yaml:"value"`
+}
+
+type PluginSetting struct {
+ // The key that the setting will be assigned to in the configuration file.
+ Key string `json:"key" yaml:"key"`
+
+ // The display name for the setting.
+ DisplayName string `json:"display_name" yaml:"display_name"`
+
+ // The type of the setting.
+ //
+ // "bool" will result in a boolean true or false setting.
+ //
+ // "dropdown" will result in a string setting that allows the user to select from a list of
+ // pre-defined options.
+ //
+ // "generated" will result in a string setting that is set to a random, cryptographically secure
+ // string.
+ //
+ // "radio" will result in a string setting that allows the user to select from a short selection
+ // of pre-defined options.
+ //
+ // "text" will result in a string setting that can be typed in manually.
+ //
+ // "username" will result in a text setting that will autocomplete to a username.
+ Type string `json:"type" yaml:"type"`
+
+ // The help text to display to the user.
+ HelpText string `json:"help_text" yaml:"help_text"`
+
+ // The help text to display alongside the "Regenerate" button for settings of the "generated" type.
+ RegenerateHelpText string `json:"regenerate_help_text,omitempty" yaml:"regenerate_help_text,omitempty"`
+
+ // The placeholder to display for "text", "generated" and "username" types when blank.
+ Placeholder string `json:"placeholder" yaml:"placeholder"`
+
+ // The default value of the setting.
+ Default interface{} `json:"default" yaml:"default"`
+
+ // For "radio" or "dropdown" settings, this is the list of pre-defined options that the user can choose
+ // from.
+ Options []*PluginOption `json:"options,omitempty" yaml:"options,omitempty"`
+}
+
+type PluginSettingsSchema struct {
+ // Optional text to display above the settings.
+ Header string `json:"header" yaml:"header"`
+
+ // Optional text to display below the settings.
+ Footer string `json:"footer" yaml:"footer"`
+
+ // A list of setting definitions.
+ Settings []*PluginSetting `json:"settings" yaml:"settings"`
+}
+
+// The plugin manifest defines the metadata required to load and present your plugin. The manifest
+// file should be named plugin.json or plugin.yaml and placed in the top of your
+// plugin bundle.
+//
+// Example plugin.yaml:
+//
+// id: com.mycompany.myplugin
+// name: My Plugin
+// description: This is my plugin. It does stuff.
+// backend:
+// executable: myplugin
+// settings_schema:
+// settings:
+// - key: enable_extra_thing
+// type: bool
+// display_name: Enable Extra Thing
+// help_text: When true, an extra thing will be enabled!
+// default: false
+type Manifest struct {
+ // The id is a globally unique identifier that represents your plugin. Ids are limited
+ // to 190 characters. Reverse-DNS notation using a name you control is a good option.
+ // For example, "com.mycompany.myplugin".
+ Id string `json:"id" yaml:"id"`
+
+ // The name to be displayed for the plugin.
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+
+ // A description of what your plugin is and does.
+ Description string `json:"description,omitempty" yaml:"description,omitempty"`
+
+ // A version number for your plugin. Semantic versioning is recommended: http://semver.org
+ Version string `json:"version" yaml:"version"`
+
+ // If your plugin extends the server, you'll need define backend.
+ Backend *ManifestBackend `json:"backend,omitempty" yaml:"backend,omitempty"`
+
+ // If your plugin extends the web app, you'll need to define webapp.
+ Webapp *ManifestWebapp `json:"webapp,omitempty" yaml:"webapp,omitempty"`
+
+ // To allow administrators to configure your plugin via the Mattermost system console, you can
+ // provide your settings schema.
+ SettingsSchema *PluginSettingsSchema `json:"settings_schema,omitempty" yaml:"settings_schema,omitempty"`
+}
+
+type ManifestBackend struct {
+ // The path to your executable binary. This should be relative to the root of your bundle and the
+ // location of the manifest file.
+ //
+ // On Windows, this file must have a ".exe" extension.
+ Executable string `json:"executable" yaml:"executable"`
+}
+
+type ManifestWebapp struct {
+ // The path to your webapp bundle. This should be relative to the root of your bundle and the
+ // location of the manifest file.
+ BundlePath string `json:"bundle_path" yaml:"bundle_path"`
+}
+
+func (m *Manifest) ToJson() string {
+ b, _ := json.Marshal(m)
+ return string(b)
+}
+
+func ManifestListToJson(m []*Manifest) string {
+ b, _ := json.Marshal(m)
+ return string(b)
+}
+
+func ManifestFromJson(data io.Reader) *Manifest {
+ var m *Manifest
+ json.NewDecoder(data).Decode(&m)
+ return m
+}
+
+func ManifestListFromJson(data io.Reader) []*Manifest {
+ var manifests []*Manifest
+ json.NewDecoder(data).Decode(&manifests)
+ return manifests
+}
+
+func (m *Manifest) HasClient() bool {
+ return m.Webapp != nil
+}
+
+func (m *Manifest) ClientManifest() *Manifest {
+ cm := new(Manifest)
+ *cm = *m
+ cm.Name = ""
+ cm.Description = ""
+ cm.Backend = nil
+ return cm
+}
+
+// FindManifest will find and parse the manifest in a given directory.
+//
+// In all cases other than a does-not-exist error, path is set to the path of the manifest file that was
+// found.
+//
+// Manifests are JSON or YAML files named plugin.json, plugin.yaml, or plugin.yml.
+func FindManifest(dir string) (manifest *Manifest, path string, err error) {
+ for _, name := range []string{"plugin.yml", "plugin.yaml"} {
+ path = filepath.Join(dir, name)
+ f, ferr := os.Open(path)
+ if ferr != nil {
+ if !os.IsNotExist(ferr) {
+ err = ferr
+ return
+ }
+ continue
+ }
+ b, ioerr := ioutil.ReadAll(f)
+ f.Close()
+ if ioerr != nil {
+ err = ioerr
+ return
+ }
+ var parsed Manifest
+ err = yaml.Unmarshal(b, &parsed)
+ if err != nil {
+ return
+ }
+ manifest = &parsed
+ return
+ }
+
+ path = filepath.Join(dir, "plugin.json")
+ f, ferr := os.Open(path)
+ if ferr != nil {
+ if os.IsNotExist(ferr) {
+ path = ""
+ }
+ err = ferr
+ return
+ }
+ defer f.Close()
+ var parsed Manifest
+ err = json.NewDecoder(f).Decode(&parsed)
+ if err != nil {
+ return
+ }
+ manifest = &parsed
+ return
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/message_export.go b/vendor/github.com/mattermost/mattermost-server/model/message_export.go
new file mode 100644
index 00000000..22641dee
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/message_export.go
@@ -0,0 +1,19 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+type MessageExport struct {
+ ChannelId *string
+ ChannelDisplayName *string
+
+ UserId *string
+ UserEmail *string
+ Username *string
+
+ PostId *string
+ PostCreateAt *int64
+ PostMessage *string
+ PostType *string
+ PostFileIds StringArray
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/mfa_secret.go b/vendor/github.com/mattermost/mattermost-server/model/mfa_secret.go
new file mode 100644
index 00000000..23a903c8
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/mfa_secret.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type MfaSecret struct {
+ Secret string `json:"secret"`
+ QRCode string `json:"qr_code"`
+}
+
+func (me *MfaSecret) ToJson() string {
+ b, _ := json.Marshal(me)
+ return string(b)
+}
+
+func MfaSecretFromJson(data io.Reader) *MfaSecret {
+ var me *MfaSecret
+ json.NewDecoder(data).Decode(&me)
+ return me
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/oauth.go b/vendor/github.com/mattermost/mattermost-server/model/oauth.go
new file mode 100644
index 00000000..70e8a3f2
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/oauth.go
@@ -0,0 +1,164 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "unicode/utf8"
+)
+
+const (
+ OAUTH_ACTION_SIGNUP = "signup"
+ OAUTH_ACTION_LOGIN = "login"
+ OAUTH_ACTION_EMAIL_TO_SSO = "email_to_sso"
+ OAUTH_ACTION_SSO_TO_EMAIL = "sso_to_email"
+ OAUTH_ACTION_MOBILE = "mobile"
+)
+
+type OAuthApp struct {
+ Id string `json:"id"`
+ CreatorId string `json:"creator_id"`
+ CreateAt int64 `json:"create_at"`
+ UpdateAt int64 `json:"update_at"`
+ ClientSecret string `json:"client_secret"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ IconURL string `json:"icon_url"`
+ CallbackUrls StringArray `json:"callback_urls"`
+ Homepage string `json:"homepage"`
+ IsTrusted bool `json:"is_trusted"`
+}
+
+// IsValid validates the app and returns an error if it isn't configured
+// correctly.
+func (a *OAuthApp) IsValid() *AppError {
+
+ if len(a.Id) != 26 {
+ return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.app_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if a.CreateAt == 0 {
+ return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.create_at.app_error", nil, "app_id="+a.Id, http.StatusBadRequest)
+ }
+
+ if a.UpdateAt == 0 {
+ return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.update_at.app_error", nil, "app_id="+a.Id, http.StatusBadRequest)
+ }
+
+ if len(a.CreatorId) != 26 {
+ return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.creator_id.app_error", nil, "app_id="+a.Id, http.StatusBadRequest)
+ }
+
+ if len(a.ClientSecret) == 0 || len(a.ClientSecret) > 128 {
+ return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.client_secret.app_error", nil, "app_id="+a.Id, http.StatusBadRequest)
+ }
+
+ if len(a.Name) == 0 || len(a.Name) > 64 {
+ return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.name.app_error", nil, "app_id="+a.Id, http.StatusBadRequest)
+ }
+
+ if len(a.CallbackUrls) == 0 || len(fmt.Sprintf("%s", a.CallbackUrls)) > 1024 {
+ return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.callback.app_error", nil, "app_id="+a.Id, http.StatusBadRequest)
+ }
+
+ for _, callback := range a.CallbackUrls {
+ if !IsValidHttpUrl(callback) {
+ return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.callback.app_error", nil, "", http.StatusBadRequest)
+ }
+ }
+
+ if len(a.Homepage) == 0 || len(a.Homepage) > 256 || !IsValidHttpUrl(a.Homepage) {
+ return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.homepage.app_error", nil, "app_id="+a.Id, http.StatusBadRequest)
+ }
+
+ if utf8.RuneCountInString(a.Description) > 512 {
+ return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.description.app_error", nil, "app_id="+a.Id, http.StatusBadRequest)
+ }
+
+ if len(a.IconURL) > 0 {
+ if len(a.IconURL) > 512 || !IsValidHttpUrl(a.IconURL) {
+ return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.icon_url.app_error", nil, "app_id="+a.Id, http.StatusBadRequest)
+ }
+ }
+
+ return nil
+}
+
+// PreSave will set the Id and ClientSecret if missing. It will also fill
+// in the CreateAt, UpdateAt times. It should be run before saving the app to the db.
+func (a *OAuthApp) PreSave() {
+ if a.Id == "" {
+ a.Id = NewId()
+ }
+
+ if a.ClientSecret == "" {
+ a.ClientSecret = NewId()
+ }
+
+ a.CreateAt = GetMillis()
+ a.UpdateAt = a.CreateAt
+}
+
+// PreUpdate should be run before updating the app in the db.
+func (a *OAuthApp) PreUpdate() {
+ a.UpdateAt = GetMillis()
+}
+
+// ToJson convert a User to a json string
+func (a *OAuthApp) ToJson() string {
+ b, _ := json.Marshal(a)
+ return string(b)
+}
+
+// Generate a valid strong etag so the browser can cache the results
+func (a *OAuthApp) Etag() string {
+ return Etag(a.Id, a.UpdateAt)
+}
+
+// Remove any private data from the app object
+func (a *OAuthApp) Sanitize() {
+ a.ClientSecret = ""
+}
+
+func (a *OAuthApp) IsValidRedirectURL(url string) bool {
+ for _, u := range a.CallbackUrls {
+ if u == url {
+ return true
+ }
+ }
+
+ return false
+}
+
+// OAuthAppFromJson will decode the input and return a User
+func OAuthAppFromJson(data io.Reader) *OAuthApp {
+ var app *OAuthApp
+ json.NewDecoder(data).Decode(&app)
+ return app
+}
+
+func OAuthAppMapToJson(a map[string]*OAuthApp) string {
+ b, _ := json.Marshal(a)
+ return string(b)
+}
+
+func OAuthAppMapFromJson(data io.Reader) map[string]*OAuthApp {
+ var apps map[string]*OAuthApp
+ json.NewDecoder(data).Decode(&apps)
+ return apps
+}
+
+func OAuthAppListToJson(l []*OAuthApp) string {
+ b, _ := json.Marshal(l)
+ return string(b)
+}
+
+func OAuthAppListFromJson(data io.Reader) []*OAuthApp {
+ var o []*OAuthApp
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/outgoing_webhook.go b/vendor/github.com/mattermost/mattermost-server/model/outgoing_webhook.go
new file mode 100644
index 00000000..b5dbf34d
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/outgoing_webhook.go
@@ -0,0 +1,254 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+type OutgoingWebhook struct {
+ Id string `json:"id"`
+ Token string `json:"token"`
+ CreateAt int64 `json:"create_at"`
+ UpdateAt int64 `json:"update_at"`
+ DeleteAt int64 `json:"delete_at"`
+ CreatorId string `json:"creator_id"`
+ ChannelId string `json:"channel_id"`
+ TeamId string `json:"team_id"`
+ TriggerWords StringArray `json:"trigger_words"`
+ TriggerWhen int `json:"trigger_when"`
+ CallbackURLs StringArray `json:"callback_urls"`
+ DisplayName string `json:"display_name"`
+ Description string `json:"description"`
+ ContentType string `json:"content_type"`
+}
+
+type OutgoingWebhookPayload struct {
+ Token string `json:"token"`
+ TeamId string `json:"team_id"`
+ TeamDomain string `json:"team_domain"`
+ ChannelId string `json:"channel_id"`
+ ChannelName string `json:"channel_name"`
+ Timestamp int64 `json:"timestamp"`
+ UserId string `json:"user_id"`
+ UserName string `json:"user_name"`
+ PostId string `json:"post_id"`
+ Text string `json:"text"`
+ TriggerWord string `json:"trigger_word"`
+ FileIds string `json:"file_ids"`
+}
+
+type OutgoingWebhookResponse struct {
+ Text *string `json:"text"`
+ Username string `json:"username"`
+ IconURL string `json:"icon_url"`
+ Props StringInterface `json:"props"`
+ Attachments []*SlackAttachment `json:"attachments"`
+ Type string `json:"type"`
+ ResponseType string `json:"response_type"`
+}
+
+const OUTGOING_HOOK_RESPONSE_TYPE_COMMENT = "comment"
+
+func (o *OutgoingWebhookPayload) ToJSON() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func (o *OutgoingWebhookPayload) ToFormValues() string {
+ v := url.Values{}
+ v.Set("token", o.Token)
+ v.Set("team_id", o.TeamId)
+ v.Set("team_domain", o.TeamDomain)
+ v.Set("channel_id", o.ChannelId)
+ v.Set("channel_name", o.ChannelName)
+ v.Set("timestamp", strconv.FormatInt(o.Timestamp/1000, 10))
+ v.Set("user_id", o.UserId)
+ v.Set("user_name", o.UserName)
+ v.Set("post_id", o.PostId)
+ v.Set("text", o.Text)
+ v.Set("trigger_word", o.TriggerWord)
+ v.Set("file_ids", o.FileIds)
+
+ return v.Encode()
+}
+
+func (o *OutgoingWebhook) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func OutgoingWebhookFromJson(data io.Reader) *OutgoingWebhook {
+ var o *OutgoingWebhook
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func OutgoingWebhookListToJson(l []*OutgoingWebhook) string {
+ b, _ := json.Marshal(l)
+ return string(b)
+}
+
+func OutgoingWebhookListFromJson(data io.Reader) []*OutgoingWebhook {
+ var o []*OutgoingWebhook
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o *OutgoingWebhookResponse) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func OutgoingWebhookResponseFromJson(data io.Reader) *OutgoingWebhookResponse {
+ var o *OutgoingWebhookResponse
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o *OutgoingWebhook) IsValid() *AppError {
+
+ if len(o.Id) != 26 {
+ return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.Token) != 26 {
+ return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.token.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if o.CreateAt == 0 {
+ return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if o.UpdateAt == 0 {
+ return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if len(o.CreatorId) != 26 {
+ return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.user_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.ChannelId) != 0 && len(o.ChannelId) != 26 {
+ return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.TeamId) != 26 {
+ return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.team_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(fmt.Sprintf("%s", o.TriggerWords)) > 1024 {
+ return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.words.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.TriggerWords) != 0 {
+ for _, triggerWord := range o.TriggerWords {
+ if len(triggerWord) == 0 {
+ return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.trigger_words.app_error", nil, "", http.StatusBadRequest)
+ }
+ }
+ }
+
+ if len(o.CallbackURLs) == 0 || len(fmt.Sprintf("%s", o.CallbackURLs)) > 1024 {
+ return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.callback.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ for _, callback := range o.CallbackURLs {
+ if !IsValidHttpUrl(callback) {
+ return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.url.app_error", nil, "", http.StatusBadRequest)
+ }
+ }
+
+ if len(o.DisplayName) > 64 {
+ return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.display_name.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.Description) > 128 {
+ return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.description.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.ContentType) > 128 {
+ return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.content_type.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if o.TriggerWhen > 1 {
+ return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.content_type.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (o *OutgoingWebhook) PreSave() {
+ if o.Id == "" {
+ o.Id = NewId()
+ }
+
+ if o.Token == "" {
+ o.Token = NewId()
+ }
+
+ o.CreateAt = GetMillis()
+ o.UpdateAt = o.CreateAt
+}
+
+func (o *OutgoingWebhook) PreUpdate() {
+ o.UpdateAt = GetMillis()
+}
+
+func (o *OutgoingWebhook) TriggerWordExactMatch(word string) bool {
+ if len(word) == 0 {
+ return false
+ }
+
+ for _, trigger := range o.TriggerWords {
+ if trigger == word {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (o *OutgoingWebhook) TriggerWordStartsWith(word string) bool {
+ if len(word) == 0 {
+ return false
+ }
+
+ for _, trigger := range o.TriggerWords {
+ if strings.HasPrefix(word, trigger) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (o *OutgoingWebhook) GetTriggerWord(word string, isExactMatch bool) (triggerWord string) {
+ if len(word) == 0 {
+ return
+ }
+
+ if isExactMatch {
+ for _, trigger := range o.TriggerWords {
+ if trigger == word {
+ triggerWord = trigger
+ break
+ }
+ }
+ } else {
+ for _, trigger := range o.TriggerWords {
+ if strings.HasPrefix(word, trigger) {
+ triggerWord = trigger
+ break
+ }
+ }
+ }
+
+ return triggerWord
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/plugin_key_value.go b/vendor/github.com/mattermost/mattermost-server/model/plugin_key_value.go
new file mode 100644
index 00000000..b7a7731c
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/plugin_key_value.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "net/http"
+ "unicode/utf8"
+)
+
+const (
+ KEY_VALUE_PLUGIN_ID_MAX_RUNES = 190
+ KEY_VALUE_KEY_MAX_RUNES = 50
+)
+
+type PluginKeyValue struct {
+ PluginId string `json:"plugin_id"`
+ Key string `json:"key" db:"PKey"`
+ Value []byte `json:"value" db:"PValue"`
+}
+
+func (kv *PluginKeyValue) IsValid() *AppError {
+ if len(kv.PluginId) == 0 || utf8.RuneCountInString(kv.PluginId) > KEY_VALUE_PLUGIN_ID_MAX_RUNES {
+ return NewAppError("PluginKeyValue.IsValid", "model.plugin_key_value.is_valid.plugin_id.app_error", map[string]interface{}{"Max": KEY_VALUE_KEY_MAX_RUNES, "Min": 0}, "key="+kv.Key, http.StatusBadRequest)
+ }
+
+ if len(kv.Key) == 0 || utf8.RuneCountInString(kv.Key) > KEY_VALUE_KEY_MAX_RUNES {
+ return NewAppError("PluginKeyValue.IsValid", "model.plugin_key_value.is_valid.key.app_error", map[string]interface{}{"Max": KEY_VALUE_KEY_MAX_RUNES, "Min": 0}, "key="+kv.Key, http.StatusBadRequest)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/plugins_response.go b/vendor/github.com/mattermost/mattermost-server/model/plugins_response.go
new file mode 100644
index 00000000..b6c01b64
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/plugins_response.go
@@ -0,0 +1,30 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type PluginInfo struct {
+ Manifest
+ Prepackaged bool `json:"prepackaged"`
+}
+
+type PluginsResponse struct {
+ Active []*PluginInfo `json:"active"`
+ Inactive []*PluginInfo `json:"inactive"`
+}
+
+func (m *PluginsResponse) ToJson() string {
+ b, _ := json.Marshal(m)
+ return string(b)
+}
+
+func PluginsResponseFromJson(data io.Reader) *PluginsResponse {
+ var m *PluginsResponse
+ json.NewDecoder(data).Decode(&m)
+ return m
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/post.go b/vendor/github.com/mattermost/mattermost-server/model/post.go
new file mode 100644
index 00000000..4a774b5d
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/post.go
@@ -0,0 +1,492 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "regexp"
+ "sort"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/mattermost/mattermost-server/utils/markdown"
+)
+
+const (
+ POST_SYSTEM_MESSAGE_PREFIX = "system_"
+ POST_DEFAULT = ""
+ POST_SLACK_ATTACHMENT = "slack_attachment"
+ POST_SYSTEM_GENERIC = "system_generic"
+ POST_JOIN_LEAVE = "system_join_leave" // Deprecated, use POST_JOIN_CHANNEL or POST_LEAVE_CHANNEL instead
+ POST_JOIN_CHANNEL = "system_join_channel"
+ POST_LEAVE_CHANNEL = "system_leave_channel"
+ POST_JOIN_TEAM = "system_join_team"
+ POST_LEAVE_TEAM = "system_leave_team"
+ POST_ADD_REMOVE = "system_add_remove" // Deprecated, use POST_ADD_TO_CHANNEL or POST_REMOVE_FROM_CHANNEL instead
+ POST_ADD_TO_CHANNEL = "system_add_to_channel"
+ POST_REMOVE_FROM_CHANNEL = "system_remove_from_channel"
+ POST_MOVE_CHANNEL = "system_move_channel"
+ POST_ADD_TO_TEAM = "system_add_to_team"
+ POST_REMOVE_FROM_TEAM = "system_remove_from_team"
+ POST_HEADER_CHANGE = "system_header_change"
+ POST_DISPLAYNAME_CHANGE = "system_displayname_change"
+ POST_PURPOSE_CHANGE = "system_purpose_change"
+ POST_CHANNEL_DELETED = "system_channel_deleted"
+ POST_EPHEMERAL = "system_ephemeral"
+ POST_CHANGE_CHANNEL_PRIVACY = "system_change_chan_privacy"
+ POST_FILEIDS_MAX_RUNES = 150
+ POST_FILENAMES_MAX_RUNES = 4000
+ POST_HASHTAGS_MAX_RUNES = 1000
+ POST_MESSAGE_MAX_RUNES = 4000
+ POST_PROPS_MAX_RUNES = 8000
+ POST_PROPS_MAX_USER_RUNES = POST_PROPS_MAX_RUNES - 400 // Leave some room for system / pre-save modifications
+ POST_CUSTOM_TYPE_PREFIX = "custom_"
+ PROPS_ADD_CHANNEL_MEMBER = "add_channel_member"
+)
+
+type Post struct {
+ Id string `json:"id"`
+ CreateAt int64 `json:"create_at"`
+ UpdateAt int64 `json:"update_at"`
+ EditAt int64 `json:"edit_at"`
+ DeleteAt int64 `json:"delete_at"`
+ IsPinned bool `json:"is_pinned"`
+ UserId string `json:"user_id"`
+ ChannelId string `json:"channel_id"`
+ RootId string `json:"root_id"`
+ ParentId string `json:"parent_id"`
+ OriginalId string `json:"original_id"`
+
+ Message string `json:"message"`
+
+ // MessageSource will contain the message as submitted by the user if Message has been modified
+ // by Mattermost for presentation (e.g if an image proxy is being used). It should be used to
+ // populate edit boxes if present.
+ MessageSource string `json:"message_source,omitempty" db:"-"`
+
+ Type string `json:"type"`
+ Props StringInterface `json:"props"`
+ Hashtags string `json:"hashtags"`
+ Filenames StringArray `json:"filenames,omitempty"` // Deprecated, do not use this field any more
+ FileIds StringArray `json:"file_ids,omitempty"`
+ PendingPostId string `json:"pending_post_id" db:"-"`
+ HasReactions bool `json:"has_reactions,omitempty"`
+}
+
+type PostPatch struct {
+ IsPinned *bool `json:"is_pinned"`
+ Message *string `json:"message"`
+ Props *StringInterface `json:"props"`
+ FileIds *StringArray `json:"file_ids"`
+ HasReactions *bool `json:"has_reactions"`
+}
+
+func (o *PostPatch) WithRewrittenImageURLs(f func(string) string) *PostPatch {
+ copy := *o
+ if copy.Message != nil {
+ *copy.Message = RewriteImageURLs(*o.Message, f)
+ }
+ return &copy
+}
+
+type PostForIndexing struct {
+ Post
+ TeamId string `json:"team_id"`
+ ParentCreateAt *int64 `json:"parent_create_at"`
+}
+
+type PostAction struct {
+ Id string `json:"id"`
+ Name string `json:"name"`
+ Integration *PostActionIntegration `json:"integration,omitempty"`
+}
+
+type PostActionIntegration struct {
+ URL string `json:"url,omitempty"`
+ Context StringInterface `json:"context,omitempty"`
+}
+
+type PostActionIntegrationRequest struct {
+ UserId string `json:"user_id"`
+ Context StringInterface `json:"context,omitempty"`
+}
+
+type PostActionIntegrationResponse struct {
+ Update *Post `json:"update"`
+ EphemeralText string `json:"ephemeral_text"`
+}
+
+func (o *Post) ToJson() string {
+ copy := *o
+ copy.StripActionIntegrations()
+ b, _ := json.Marshal(&copy)
+ return string(b)
+}
+
+func (o *Post) ToUnsanitizedJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func PostFromJson(data io.Reader) *Post {
+ var o *Post
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o *Post) Etag() string {
+ return Etag(o.Id, o.UpdateAt)
+}
+
+func (o *Post) IsValid() *AppError {
+
+ if len(o.Id) != 26 {
+ return NewAppError("Post.IsValid", "model.post.is_valid.id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if o.CreateAt == 0 {
+ return NewAppError("Post.IsValid", "model.post.is_valid.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if o.UpdateAt == 0 {
+ return NewAppError("Post.IsValid", "model.post.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if len(o.UserId) != 26 {
+ return NewAppError("Post.IsValid", "model.post.is_valid.user_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.ChannelId) != 26 {
+ return NewAppError("Post.IsValid", "model.post.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if !(len(o.RootId) == 26 || len(o.RootId) == 0) {
+ return NewAppError("Post.IsValid", "model.post.is_valid.root_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if !(len(o.ParentId) == 26 || len(o.ParentId) == 0) {
+ return NewAppError("Post.IsValid", "model.post.is_valid.parent_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.ParentId) == 26 && len(o.RootId) == 0 {
+ return NewAppError("Post.IsValid", "model.post.is_valid.root_parent.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if !(len(o.OriginalId) == 26 || len(o.OriginalId) == 0) {
+ return NewAppError("Post.IsValid", "model.post.is_valid.original_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if utf8.RuneCountInString(o.Message) > POST_MESSAGE_MAX_RUNES {
+ return NewAppError("Post.IsValid", "model.post.is_valid.msg.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if utf8.RuneCountInString(o.Hashtags) > POST_HASHTAGS_MAX_RUNES {
+ return NewAppError("Post.IsValid", "model.post.is_valid.hashtags.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ switch o.Type {
+ case
+ POST_DEFAULT,
+ POST_JOIN_LEAVE,
+ POST_ADD_REMOVE,
+ POST_JOIN_CHANNEL,
+ POST_LEAVE_CHANNEL,
+ POST_JOIN_TEAM,
+ POST_LEAVE_TEAM,
+ POST_ADD_TO_CHANNEL,
+ POST_REMOVE_FROM_CHANNEL,
+ POST_MOVE_CHANNEL,
+ POST_ADD_TO_TEAM,
+ POST_REMOVE_FROM_TEAM,
+ POST_SLACK_ATTACHMENT,
+ POST_HEADER_CHANGE,
+ POST_PURPOSE_CHANGE,
+ POST_DISPLAYNAME_CHANGE,
+ POST_CHANNEL_DELETED,
+ POST_CHANGE_CHANNEL_PRIVACY:
+ default:
+ if !strings.HasPrefix(o.Type, POST_CUSTOM_TYPE_PREFIX) {
+ return NewAppError("Post.IsValid", "model.post.is_valid.type.app_error", nil, "id="+o.Type, http.StatusBadRequest)
+ }
+ }
+
+ if utf8.RuneCountInString(ArrayToJson(o.Filenames)) > POST_FILENAMES_MAX_RUNES {
+ return NewAppError("Post.IsValid", "model.post.is_valid.filenames.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if utf8.RuneCountInString(ArrayToJson(o.FileIds)) > POST_FILEIDS_MAX_RUNES {
+ return NewAppError("Post.IsValid", "model.post.is_valid.file_ids.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if utf8.RuneCountInString(StringInterfaceToJson(o.Props)) > POST_PROPS_MAX_RUNES {
+ return NewAppError("Post.IsValid", "model.post.is_valid.props.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (o *Post) SanitizeProps() {
+ membersToSanitize := []string{
+ PROPS_ADD_CHANNEL_MEMBER,
+ }
+
+ for _, member := range membersToSanitize {
+ if _, ok := o.Props[member]; ok {
+ delete(o.Props, member)
+ }
+ }
+}
+
+func (o *Post) PreSave() {
+ if o.Id == "" {
+ o.Id = NewId()
+ }
+
+ o.OriginalId = ""
+
+ if o.CreateAt == 0 {
+ o.CreateAt = GetMillis()
+ }
+
+ o.UpdateAt = o.CreateAt
+ o.PreCommit()
+}
+
+func (o *Post) PreCommit() {
+ if o.Props == nil {
+ o.Props = make(map[string]interface{})
+ }
+
+ if o.Filenames == nil {
+ o.Filenames = []string{}
+ }
+
+ if o.FileIds == nil {
+ o.FileIds = []string{}
+ }
+
+ o.GenerateActionIds()
+}
+
+func (o *Post) MakeNonNil() {
+ if o.Props == nil {
+ o.Props = make(map[string]interface{})
+ }
+}
+
+func (o *Post) AddProp(key string, value interface{}) {
+
+ o.MakeNonNil()
+
+ o.Props[key] = value
+}
+
+func (o *Post) IsSystemMessage() bool {
+ return len(o.Type) >= len(POST_SYSTEM_MESSAGE_PREFIX) && o.Type[:len(POST_SYSTEM_MESSAGE_PREFIX)] == POST_SYSTEM_MESSAGE_PREFIX
+}
+
+func (p *Post) Patch(patch *PostPatch) {
+ if patch.IsPinned != nil {
+ p.IsPinned = *patch.IsPinned
+ }
+
+ if patch.Message != nil {
+ p.Message = *patch.Message
+ }
+
+ if patch.Props != nil {
+ p.Props = *patch.Props
+ }
+
+ if patch.FileIds != nil {
+ p.FileIds = *patch.FileIds
+ }
+
+ if patch.HasReactions != nil {
+ p.HasReactions = *patch.HasReactions
+ }
+}
+
+func (o *PostPatch) ToJson() string {
+ b, err := json.Marshal(o)
+ if err != nil {
+ return ""
+ }
+
+ return string(b)
+}
+
+func PostPatchFromJson(data io.Reader) *PostPatch {
+ decoder := json.NewDecoder(data)
+ var post PostPatch
+ err := decoder.Decode(&post)
+ if err != nil {
+ return nil
+ }
+
+ return &post
+}
+
+var channelMentionRegexp = regexp.MustCompile(`\B~[a-zA-Z0-9\-_]+`)
+
+func (o *Post) ChannelMentions() (names []string) {
+ if strings.Contains(o.Message, "~") {
+ alreadyMentioned := make(map[string]bool)
+ for _, match := range channelMentionRegexp.FindAllString(o.Message, -1) {
+ name := match[1:]
+ if !alreadyMentioned[name] {
+ names = append(names, name)
+ alreadyMentioned[name] = true
+ }
+ }
+ }
+ return
+}
+
+func (r *PostActionIntegrationRequest) ToJson() string {
+ b, _ := json.Marshal(r)
+ return string(b)
+}
+
+func (o *Post) Attachments() []*SlackAttachment {
+ if attachments, ok := o.Props["attachments"].([]*SlackAttachment); ok {
+ return attachments
+ }
+ var ret []*SlackAttachment
+ if attachments, ok := o.Props["attachments"].([]interface{}); ok {
+ for _, attachment := range attachments {
+ if enc, err := json.Marshal(attachment); err == nil {
+ var decoded SlackAttachment
+ if json.Unmarshal(enc, &decoded) == nil {
+ ret = append(ret, &decoded)
+ }
+ }
+ }
+ }
+ return ret
+}
+
+func (o *Post) StripActionIntegrations() {
+ attachments := o.Attachments()
+ if o.Props["attachments"] != nil {
+ o.Props["attachments"] = attachments
+ }
+ for _, attachment := range attachments {
+ for _, action := range attachment.Actions {
+ action.Integration = nil
+ }
+ }
+}
+
+func (o *Post) GetAction(id string) *PostAction {
+ for _, attachment := range o.Attachments() {
+ for _, action := range attachment.Actions {
+ if action.Id == id {
+ return action
+ }
+ }
+ }
+ return nil
+}
+
+func (o *Post) GenerateActionIds() {
+ if o.Props["attachments"] != nil {
+ o.Props["attachments"] = o.Attachments()
+ }
+ if attachments, ok := o.Props["attachments"].([]*SlackAttachment); ok {
+ for _, attachment := range attachments {
+ for _, action := range attachment.Actions {
+ if action.Id == "" {
+ action.Id = NewId()
+ }
+ }
+ }
+ }
+}
+
+var markdownDestinationEscaper = strings.NewReplacer(
+ `\`, `\\`,
+ `<`, `\<`,
+ `>`, `\>`,
+ `(`, `\(`,
+ `)`, `\)`,
+)
+
+// WithRewrittenImageURLs returns a new shallow copy of the post where the message has been
+// rewritten via RewriteImageURLs.
+func (o *Post) WithRewrittenImageURLs(f func(string) string) *Post {
+ copy := *o
+ copy.Message = RewriteImageURLs(o.Message, f)
+ if copy.MessageSource == "" && copy.Message != o.Message {
+ copy.MessageSource = o.Message
+ }
+ return &copy
+}
+
+// RewriteImageURLs takes a message and returns a copy that has all of the image URLs replaced
+// according to the function f. For each image URL, f will be invoked, and the resulting markdown
+// will contain the URL returned by that invocation instead.
+//
+// Image URLs are destination URLs used in inline images or reference definitions that are used
+// anywhere in the input markdown as an image.
+func RewriteImageURLs(message string, f func(string) string) string {
+ if !strings.Contains(message, "![") {
+ return message
+ }
+
+ var ranges []markdown.Range
+
+ markdown.Inspect(message, func(blockOrInline interface{}) bool {
+ switch v := blockOrInline.(type) {
+ case *markdown.ReferenceImage:
+ ranges = append(ranges, v.ReferenceDefinition.RawDestination)
+ case *markdown.InlineImage:
+ ranges = append(ranges, v.RawDestination)
+ default:
+ return true
+ }
+ return true
+ })
+
+ if ranges == nil {
+ return message
+ }
+
+ sort.Slice(ranges, func(i, j int) bool {
+ return ranges[i].Position < ranges[j].Position
+ })
+
+ copyRanges := make([]markdown.Range, 0, len(ranges))
+ urls := make([]string, 0, len(ranges))
+ resultLength := len(message)
+
+ start := 0
+ for i, r := range ranges {
+ switch {
+ case i == 0:
+ case r.Position != ranges[i-1].Position:
+ start = ranges[i-1].End
+ default:
+ continue
+ }
+ original := message[r.Position:r.End]
+ replacement := markdownDestinationEscaper.Replace(f(markdown.Unescape(original)))
+ resultLength += len(replacement) - len(original)
+ copyRanges = append(copyRanges, markdown.Range{Position: start, End: r.Position})
+ urls = append(urls, replacement)
+ }
+
+ result := make([]byte, resultLength)
+
+ offset := 0
+ for i, r := range copyRanges {
+ offset += copy(result[offset:], message[r.Position:r.End])
+ offset += copy(result[offset:], urls[i])
+ }
+ copy(result[offset:], message[ranges[len(ranges)-1].End:])
+
+ return string(result)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/post_list.go b/vendor/github.com/mattermost/mattermost-server/model/post_list.go
new file mode 100644
index 00000000..27c22e7b
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/post_list.go
@@ -0,0 +1,138 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "sort"
+)
+
+type PostList struct {
+ Order []string `json:"order"`
+ Posts map[string]*Post `json:"posts"`
+}
+
+func NewPostList() *PostList {
+ return &PostList{
+ Order: make([]string, 0),
+ Posts: make(map[string]*Post),
+ }
+}
+
+func (o *PostList) WithRewrittenImageURLs(f func(string) string) *PostList {
+ copy := *o
+ copy.Posts = make(map[string]*Post)
+ for id, post := range o.Posts {
+ copy.Posts[id] = post.WithRewrittenImageURLs(f)
+ }
+ return &copy
+}
+
+func (o *PostList) StripActionIntegrations() {
+ posts := o.Posts
+ o.Posts = make(map[string]*Post)
+ for id, post := range posts {
+ pcopy := *post
+ pcopy.StripActionIntegrations()
+ o.Posts[id] = &pcopy
+ }
+}
+
+func (o *PostList) ToJson() string {
+ copy := *o
+ copy.StripActionIntegrations()
+ b, err := json.Marshal(&copy)
+ if err != nil {
+ return ""
+ } else {
+ return string(b)
+ }
+}
+
+func (o *PostList) MakeNonNil() {
+ if o.Order == nil {
+ o.Order = make([]string, 0)
+ }
+
+ if o.Posts == nil {
+ o.Posts = make(map[string]*Post)
+ }
+
+ for _, v := range o.Posts {
+ v.MakeNonNil()
+ }
+}
+
+func (o *PostList) AddOrder(id string) {
+
+ if o.Order == nil {
+ o.Order = make([]string, 0, 128)
+ }
+
+ o.Order = append(o.Order, id)
+}
+
+func (o *PostList) AddPost(post *Post) {
+
+ if o.Posts == nil {
+ o.Posts = make(map[string]*Post)
+ }
+
+ o.Posts[post.Id] = post
+}
+
+func (o *PostList) Extend(other *PostList) {
+ for _, postId := range other.Order {
+ if _, ok := o.Posts[postId]; !ok {
+ o.AddPost(other.Posts[postId])
+ o.AddOrder(postId)
+ }
+ }
+}
+
+func (o *PostList) SortByCreateAt() {
+ sort.Slice(o.Order, func(i, j int) bool {
+ return o.Posts[o.Order[i]].CreateAt > o.Posts[o.Order[j]].CreateAt
+ })
+}
+
+func (o *PostList) Etag() string {
+
+ id := "0"
+ var t int64 = 0
+
+ for _, v := range o.Posts {
+ if v.UpdateAt > t {
+ t = v.UpdateAt
+ id = v.Id
+ } else if v.UpdateAt == t && v.Id > id {
+ t = v.UpdateAt
+ id = v.Id
+ }
+ }
+
+ orderId := ""
+ if len(o.Order) > 0 {
+ orderId = o.Order[0]
+ }
+
+ return Etag(orderId, id, t)
+}
+
+func (o *PostList) IsChannelId(channelId string) bool {
+ for _, v := range o.Posts {
+ if v.ChannelId != channelId {
+ return false
+ }
+ }
+
+ return true
+}
+
+func PostListFromJson(data io.Reader) *PostList {
+ var o *PostList
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/preference.go b/vendor/github.com/mattermost/mattermost-server/model/preference.go
new file mode 100644
index 00000000..dc97314c
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/preference.go
@@ -0,0 +1,113 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ PREFERENCE_CATEGORY_DIRECT_CHANNEL_SHOW = "direct_channel_show"
+ PREFERENCE_CATEGORY_TUTORIAL_STEPS = "tutorial_step"
+ PREFERENCE_CATEGORY_ADVANCED_SETTINGS = "advanced_settings"
+ PREFERENCE_CATEGORY_FLAGGED_POST = "flagged_post"
+ PREFERENCE_CATEGORY_FAVORITE_CHANNEL = "favorite_channel"
+
+ PREFERENCE_CATEGORY_DISPLAY_SETTINGS = "display_settings"
+ PREFERENCE_NAME_COLLAPSE_SETTING = "collapse_previews"
+
+ PREFERENCE_CATEGORY_THEME = "theme"
+ // the name for theme props is the team id
+
+ PREFERENCE_CATEGORY_AUTHORIZED_OAUTH_APP = "oauth_app"
+ // the name for oauth_app is the client_id and value is the current scope
+
+ PREFERENCE_CATEGORY_LAST = "last"
+ PREFERENCE_NAME_LAST_CHANNEL = "channel"
+ PREFERENCE_NAME_LAST_TEAM = "team"
+
+ PREFERENCE_CATEGORY_NOTIFICATIONS = "notifications"
+ PREFERENCE_NAME_EMAIL_INTERVAL = "email_interval"
+
+ PREFERENCE_EMAIL_INTERVAL_NO_BATCHING_SECONDS = "30" // the "immediate" setting is actually 30s
+ PREFERENCE_EMAIL_INTERVAL_BATCHING_SECONDS = "900" // fifteen minutes is 900 seconds
+)
+
+type Preference struct {
+ UserId string `json:"user_id"`
+ Category string `json:"category"`
+ Name string `json:"name"`
+ Value string `json:"value"`
+}
+
+func (o *Preference) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func PreferenceFromJson(data io.Reader) *Preference {
+ var o *Preference
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o *Preference) IsValid() *AppError {
+ if len(o.UserId) != 26 {
+ return NewAppError("Preference.IsValid", "model.preference.is_valid.id.app_error", nil, "user_id="+o.UserId, http.StatusBadRequest)
+ }
+
+ if len(o.Category) == 0 || len(o.Category) > 32 {
+ return NewAppError("Preference.IsValid", "model.preference.is_valid.category.app_error", nil, "category="+o.Category, http.StatusBadRequest)
+ }
+
+ if len(o.Name) > 32 {
+ return NewAppError("Preference.IsValid", "model.preference.is_valid.name.app_error", nil, "name="+o.Name, http.StatusBadRequest)
+ }
+
+ if utf8.RuneCountInString(o.Value) > 2000 {
+ return NewAppError("Preference.IsValid", "model.preference.is_valid.value.app_error", nil, "value="+o.Value, http.StatusBadRequest)
+ }
+
+ if o.Category == PREFERENCE_CATEGORY_THEME {
+ var unused map[string]string
+ if err := json.NewDecoder(strings.NewReader(o.Value)).Decode(&unused); err != nil {
+ return NewAppError("Preference.IsValid", "model.preference.is_valid.theme.app_error", nil, "value="+o.Value, http.StatusBadRequest)
+ }
+ }
+
+ return nil
+}
+
+func (o *Preference) PreUpdate() {
+ if o.Category == PREFERENCE_CATEGORY_THEME {
+ // decode the value of theme (a map of strings to string) and eliminate any invalid values
+ var props map[string]string
+ if err := json.NewDecoder(strings.NewReader(o.Value)).Decode(&props); err != nil {
+ // just continue, the invalid preference value should get caught by IsValid before saving
+ return
+ }
+
+ colorPattern := regexp.MustCompile(`^#[0-9a-fA-F]{3}([0-9a-fA-F]{3})?$`)
+
+ // blank out any invalid theme values
+ for name, value := range props {
+ if name == "image" || name == "type" || name == "codeTheme" {
+ continue
+ }
+
+ if !colorPattern.MatchString(value) {
+ props[name] = "#ffffff"
+ }
+ }
+
+ if b, err := json.Marshal(props); err == nil {
+ o.Value = string(b)
+ }
+ }
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/preferences.go b/vendor/github.com/mattermost/mattermost-server/model/preferences.go
new file mode 100644
index 00000000..172e1aa8
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/preferences.go
@@ -0,0 +1,27 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type Preferences []Preference
+
+func (o *Preferences) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func PreferencesFromJson(data io.Reader) (Preferences, error) {
+ decoder := json.NewDecoder(data)
+ var o Preferences
+ err := decoder.Decode(&o)
+ if err == nil {
+ return o, nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/push_notification.go b/vendor/github.com/mattermost/mattermost-server/model/push_notification.go
new file mode 100644
index 00000000..0d7ba77a
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/push_notification.go
@@ -0,0 +1,68 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "strings"
+)
+
+const (
+ PUSH_NOTIFY_APPLE = "apple"
+ PUSH_NOTIFY_ANDROID = "android"
+ PUSH_NOTIFY_APPLE_REACT_NATIVE = "apple_rn"
+ PUSH_NOTIFY_ANDROID_REACT_NATIVE = "android_rn"
+
+ PUSH_TYPE_MESSAGE = "message"
+ PUSH_TYPE_CLEAR = "clear"
+
+ // The category is set to handle a set of interactive Actions
+ // with the push notifications
+ CATEGORY_CAN_REPLY = "CAN_REPLY"
+
+ MHPNS = "https://push.mattermost.com"
+)
+
+type PushNotification struct {
+ Platform string `json:"platform"`
+ ServerId string `json:"server_id"`
+ DeviceId string `json:"device_id"`
+ Category string `json:"category"`
+ Sound string `json:"sound"`
+ Message string `json:"message"`
+ Badge int `json:"badge"`
+ ContentAvailable int `json:"cont_ava"`
+ TeamId string `json:"team_id"`
+ ChannelId string `json:"channel_id"`
+ PostId string `json:"post_id"`
+ RootId string `json:"root_id"`
+ ChannelName string `json:"channel_name"`
+ Type string `json:"type"`
+ SenderId string `json:"sender_id"`
+ OverrideUsername string `json:"override_username"`
+ OverrideIconUrl string `json:"override_icon_url"`
+ FromWebhook string `json:"from_webhook"`
+}
+
+func (me *PushNotification) ToJson() string {
+ b, _ := json.Marshal(me)
+ return string(b)
+}
+
+func (me *PushNotification) SetDeviceIdAndPlatform(deviceId string) {
+
+ index := strings.Index(deviceId, ":")
+
+ if index > -1 {
+ me.Platform = deviceId[:index]
+ me.DeviceId = deviceId[index+1:]
+ }
+}
+
+func PushNotificationFromJson(data io.Reader) *PushNotification {
+ var me *PushNotification
+ json.NewDecoder(data).Decode(&me)
+ return me
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/push_response.go b/vendor/github.com/mattermost/mattermost-server/model/push_response.go
new file mode 100644
index 00000000..1434a2b1
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/push_response.go
@@ -0,0 +1,54 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+const (
+ PUSH_STATUS = "status"
+ PUSH_STATUS_OK = "OK"
+ PUSH_STATUS_FAIL = "FAIL"
+ PUSH_STATUS_REMOVE = "REMOVE"
+ PUSH_STATUS_ERROR_MSG = "error"
+)
+
+type PushResponse map[string]string
+
+func NewOkPushResponse() PushResponse {
+ m := make(map[string]string)
+ m[PUSH_STATUS] = PUSH_STATUS_OK
+ return m
+}
+
+func NewRemovePushResponse() PushResponse {
+ m := make(map[string]string)
+ m[PUSH_STATUS] = PUSH_STATUS_REMOVE
+ return m
+}
+
+func NewErrorPushResponse(message string) PushResponse {
+ m := make(map[string]string)
+ m[PUSH_STATUS] = PUSH_STATUS_FAIL
+ m[PUSH_STATUS_ERROR_MSG] = message
+ return m
+}
+
+func (me *PushResponse) ToJson() string {
+ b, _ := json.Marshal(me)
+ return string(b)
+}
+
+func PushResponseFromJson(data io.Reader) PushResponse {
+ decoder := json.NewDecoder(data)
+
+ var objmap PushResponse
+ if err := decoder.Decode(&objmap); err != nil {
+ return make(map[string]string)
+ } else {
+ return objmap
+ }
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/reaction.go b/vendor/github.com/mattermost/mattermost-server/model/reaction.go
new file mode 100644
index 00000000..c1b9c499
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/reaction.go
@@ -0,0 +1,76 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "regexp"
+)
+
+type Reaction struct {
+ UserId string `json:"user_id"`
+ PostId string `json:"post_id"`
+ EmojiName string `json:"emoji_name"`
+ CreateAt int64 `json:"create_at"`
+}
+
+func (o *Reaction) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func ReactionFromJson(data io.Reader) *Reaction {
+ var o Reaction
+
+ if err := json.NewDecoder(data).Decode(&o); err != nil {
+ return nil
+ } else {
+ return &o
+ }
+}
+
+func ReactionsToJson(o []*Reaction) string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func ReactionsFromJson(data io.Reader) []*Reaction {
+ var o []*Reaction
+
+ if err := json.NewDecoder(data).Decode(&o); err != nil {
+ return nil
+ } else {
+ return o
+ }
+}
+
+func (o *Reaction) IsValid() *AppError {
+ if len(o.UserId) != 26 {
+ return NewAppError("Reaction.IsValid", "model.reaction.is_valid.user_id.app_error", nil, "user_id="+o.UserId, http.StatusBadRequest)
+ }
+
+ if len(o.PostId) != 26 {
+ return NewAppError("Reaction.IsValid", "model.reaction.is_valid.post_id.app_error", nil, "post_id="+o.PostId, http.StatusBadRequest)
+ }
+
+ validName := regexp.MustCompile(`^[a-zA-Z0-9\-\+_]+$`)
+
+ if len(o.EmojiName) == 0 || len(o.EmojiName) > EMOJI_NAME_MAX_LENGTH || !validName.MatchString(o.EmojiName) {
+ return NewAppError("Reaction.IsValid", "model.reaction.is_valid.emoji_name.app_error", nil, "emoji_name="+o.EmojiName, http.StatusBadRequest)
+ }
+
+ if o.CreateAt == 0 {
+ return NewAppError("Reaction.IsValid", "model.reaction.is_valid.create_at.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (o *Reaction) PreSave() {
+ if o.CreateAt == 0 {
+ o.CreateAt = GetMillis()
+ }
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/saml.go b/vendor/github.com/mattermost/mattermost-server/model/saml.go
new file mode 100644
index 00000000..e7475015
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/saml.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+const (
+ USER_AUTH_SERVICE_SAML = "saml"
+ USER_AUTH_SERVICE_SAML_TEXT = "With SAML"
+ SAML_IDP_CERTIFICATE = 1
+ SAML_PRIVATE_KEY = 2
+ SAML_PUBLIC_CERT = 3
+)
+
+type SamlAuthRequest struct {
+ Base64AuthRequest string
+ URL string
+ RelayState string
+}
+
+type SamlCertificateStatus struct {
+ IdpCertificateFile bool `json:"idp_certificate_file"`
+ PrivateKeyFile bool `json:"private_key_file"`
+ PublicCertificateFile bool `json:"public_certificate_file"`
+}
+
+func (s *SamlCertificateStatus) ToJson() string {
+ b, _ := json.Marshal(s)
+ return string(b)
+}
+
+func SamlCertificateStatusFromJson(data io.Reader) *SamlCertificateStatus {
+ var status *SamlCertificateStatus
+ json.NewDecoder(data).Decode(&status)
+ return status
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/scheduled_task.go b/vendor/github.com/mattermost/mattermost-server/model/scheduled_task.go
new file mode 100644
index 00000000..453828bd
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/scheduled_task.go
@@ -0,0 +1,110 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "fmt"
+ "sync"
+ "time"
+)
+
+type TaskFunc func()
+
+type ScheduledTask struct {
+ Name string `json:"name"`
+ Interval time.Duration `json:"interval"`
+ Recurring bool `json:"recurring"`
+ function TaskFunc
+ timer *time.Timer
+}
+
+var taskMutex = sync.Mutex{}
+var tasks = make(map[string]*ScheduledTask)
+
+func addTask(task *ScheduledTask) {
+ taskMutex.Lock()
+ defer taskMutex.Unlock()
+ tasks[task.Name] = task
+}
+
+func removeTaskByName(name string) {
+ taskMutex.Lock()
+ defer taskMutex.Unlock()
+ delete(tasks, name)
+}
+
+func GetTaskByName(name string) *ScheduledTask {
+ taskMutex.Lock()
+ defer taskMutex.Unlock()
+ if task, ok := tasks[name]; ok {
+ return task
+ }
+ return nil
+}
+
+func GetAllTasks() *map[string]*ScheduledTask {
+ taskMutex.Lock()
+ defer taskMutex.Unlock()
+ return &tasks
+}
+
+func CreateTask(name string, function TaskFunc, timeToExecution time.Duration) *ScheduledTask {
+ task := &ScheduledTask{
+ Name: name,
+ Interval: timeToExecution,
+ Recurring: false,
+ function: function,
+ }
+
+ taskRunner := func() {
+ go task.function()
+ removeTaskByName(task.Name)
+ }
+
+ task.timer = time.AfterFunc(timeToExecution, taskRunner)
+
+ addTask(task)
+
+ return task
+}
+
+func CreateRecurringTask(name string, function TaskFunc, interval time.Duration) *ScheduledTask {
+ task := &ScheduledTask{
+ Name: name,
+ Interval: interval,
+ Recurring: true,
+ function: function,
+ }
+
+ taskRecurer := func() {
+ go task.function()
+ task.timer.Reset(task.Interval)
+ }
+
+ task.timer = time.AfterFunc(interval, taskRecurer)
+
+ addTask(task)
+
+ return task
+}
+
+func (task *ScheduledTask) Cancel() {
+ task.timer.Stop()
+ removeTaskByName(task.Name)
+}
+
+// Executes the task immediatly. A recurring task will be run regularally after interval.
+func (task *ScheduledTask) Execute() {
+ task.function()
+ task.timer.Reset(task.Interval)
+}
+
+func (task *ScheduledTask) String() string {
+ return fmt.Sprintf(
+ "%s\nInterval: %s\nRecurring: %t\n",
+ task.Name,
+ task.Interval.String(),
+ task.Recurring,
+ )
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/search_params.go b/vendor/github.com/mattermost/mattermost-server/model/search_params.go
new file mode 100644
index 00000000..1692b3aa
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/search_params.go
@@ -0,0 +1,171 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "regexp"
+ "strings"
+)
+
+var searchTermPuncStart = regexp.MustCompile(`^[^\pL\d\s#"]+`)
+var searchTermPuncEnd = regexp.MustCompile(`[^\pL\d\s*"]+$`)
+
+type SearchParams struct {
+ Terms string
+ IsHashtag bool
+ InChannels []string
+ FromUsers []string
+ OrTerms bool
+}
+
+func (o *SearchParams) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+var searchFlags = [...]string{"from", "channel", "in"}
+
+func splitWords(text string) []string {
+ words := []string{}
+
+ foundQuote := false
+ location := 0
+ for i, char := range text {
+ if char == '"' {
+ if foundQuote {
+ // Grab the quoted section
+ word := text[location : i+1]
+ words = append(words, word)
+ foundQuote = false
+ location = i + 1
+ } else {
+ words = append(words, strings.Fields(text[location:i])...)
+ foundQuote = true
+ location = i
+ }
+ }
+ }
+
+ words = append(words, strings.Fields(text[location:])...)
+
+ return words
+}
+
+func parseSearchFlags(input []string) ([]string, [][2]string) {
+ words := []string{}
+ flags := [][2]string{}
+
+ skipNextWord := false
+ for i, word := range input {
+ if skipNextWord {
+ skipNextWord = false
+ continue
+ }
+
+ isFlag := false
+
+ if colon := strings.Index(word, ":"); colon != -1 {
+ flag := word[:colon]
+ value := word[colon+1:]
+
+ for _, searchFlag := range searchFlags {
+ // check for case insensitive equality
+ if strings.EqualFold(flag, searchFlag) {
+ if value != "" {
+ flags = append(flags, [2]string{searchFlag, value})
+ isFlag = true
+ } else if i < len(input)-1 {
+ flags = append(flags, [2]string{searchFlag, input[i+1]})
+ skipNextWord = true
+ isFlag = true
+ }
+
+ if isFlag {
+ break
+ }
+ }
+ }
+ }
+
+ if !isFlag {
+ // trim off surrounding punctuation (note that we leave trailing asterisks to allow wildcards)
+ word = searchTermPuncStart.ReplaceAllString(word, "")
+ word = searchTermPuncEnd.ReplaceAllString(word, "")
+
+ // and remove extra pound #s
+ word = hashtagStart.ReplaceAllString(word, "#")
+
+ if len(word) != 0 {
+ words = append(words, word)
+ }
+ }
+ }
+
+ return words, flags
+}
+
+func ParseSearchParams(text string) []*SearchParams {
+ words, flags := parseSearchFlags(splitWords(text))
+
+ hashtagTermList := []string{}
+ plainTermList := []string{}
+
+ for _, word := range words {
+ if validHashtag.MatchString(word) {
+ hashtagTermList = append(hashtagTermList, word)
+ } else {
+ plainTermList = append(plainTermList, word)
+ }
+ }
+
+ hashtagTerms := strings.Join(hashtagTermList, " ")
+ plainTerms := strings.Join(plainTermList, " ")
+
+ inChannels := []string{}
+ fromUsers := []string{}
+
+ for _, flagPair := range flags {
+ flag := flagPair[0]
+ value := flagPair[1]
+
+ if flag == "in" || flag == "channel" {
+ inChannels = append(inChannels, value)
+ } else if flag == "from" {
+ fromUsers = append(fromUsers, value)
+ }
+ }
+
+ paramsList := []*SearchParams{}
+
+ if len(plainTerms) > 0 {
+ paramsList = append(paramsList, &SearchParams{
+ Terms: plainTerms,
+ IsHashtag: false,
+ InChannels: inChannels,
+ FromUsers: fromUsers,
+ })
+ }
+
+ if len(hashtagTerms) > 0 {
+ paramsList = append(paramsList, &SearchParams{
+ Terms: hashtagTerms,
+ IsHashtag: true,
+ InChannels: inChannels,
+ FromUsers: fromUsers,
+ })
+ }
+
+ // special case for when no terms are specified but we still have a filter
+ if len(plainTerms) == 0 && len(hashtagTerms) == 0 && (len(inChannels) != 0 || len(fromUsers) != 0) {
+ paramsList = append(paramsList, &SearchParams{
+ Terms: "",
+ IsHashtag: false,
+ InChannels: inChannels,
+ FromUsers: fromUsers,
+ })
+ }
+
+ return paramsList
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/security_bulletin.go b/vendor/github.com/mattermost/mattermost-server/model/security_bulletin.go
new file mode 100644
index 00000000..958b9c9e
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/security_bulletin.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type SecurityBulletin struct {
+ Id string `json:"id"`
+ AppliesToVersion string `json:"applies_to_version"`
+}
+
+type SecurityBulletins []SecurityBulletin
+
+func (me *SecurityBulletin) ToJson() string {
+ b, _ := json.Marshal(me)
+ return string(b)
+}
+
+func SecurityBulletinFromJson(data io.Reader) *SecurityBulletin {
+ var o *SecurityBulletin
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (me SecurityBulletins) ToJson() string {
+ if b, err := json.Marshal(me); err != nil {
+ return "[]"
+ } else {
+ return string(b)
+ }
+}
+
+func SecurityBulletinsFromJson(data io.Reader) SecurityBulletins {
+ var o SecurityBulletins
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/session.go b/vendor/github.com/mattermost/mattermost-server/model/session.go
new file mode 100644
index 00000000..a407af26
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/session.go
@@ -0,0 +1,137 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "strings"
+)
+
+const (
+ SESSION_COOKIE_TOKEN = "MMAUTHTOKEN"
+ SESSION_COOKIE_USER = "MMUSERID"
+ SESSION_CACHE_SIZE = 35000
+ SESSION_PROP_PLATFORM = "platform"
+ SESSION_PROP_OS = "os"
+ SESSION_PROP_BROWSER = "browser"
+ SESSION_PROP_TYPE = "type"
+ SESSION_PROP_USER_ACCESS_TOKEN_ID = "user_access_token_id"
+ SESSION_TYPE_USER_ACCESS_TOKEN = "UserAccessToken"
+ SESSION_ACTIVITY_TIMEOUT = 1000 * 60 * 5 // 5 minutes
+ SESSION_USER_ACCESS_TOKEN_EXPIRY = 100 * 365 // 100 years
+)
+
+type Session struct {
+ Id string `json:"id"`
+ Token string `json:"token"`
+ CreateAt int64 `json:"create_at"`
+ ExpiresAt int64 `json:"expires_at"`
+ LastActivityAt int64 `json:"last_activity_at"`
+ UserId string `json:"user_id"`
+ DeviceId string `json:"device_id"`
+ Roles string `json:"roles"`
+ IsOAuth bool `json:"is_oauth"`
+ Props StringMap `json:"props"`
+ TeamMembers []*TeamMember `json:"team_members" db:"-"`
+}
+
+func (me *Session) DeepCopy() *Session {
+ copy := *me
+ return &copy
+}
+
+func (me *Session) ToJson() string {
+ b, _ := json.Marshal(me)
+ return string(b)
+}
+
+func SessionFromJson(data io.Reader) *Session {
+ var me *Session
+ json.NewDecoder(data).Decode(&me)
+ return me
+}
+
+func (me *Session) PreSave() {
+ if me.Id == "" {
+ me.Id = NewId()
+ }
+
+ if me.Token == "" {
+ me.Token = NewId()
+ }
+
+ me.CreateAt = GetMillis()
+ me.LastActivityAt = me.CreateAt
+
+ if me.Props == nil {
+ me.Props = make(map[string]string)
+ }
+}
+
+func (me *Session) Sanitize() {
+ me.Token = ""
+}
+
+func (me *Session) IsExpired() bool {
+
+ if me.ExpiresAt <= 0 {
+ return false
+ }
+
+ if GetMillis() > me.ExpiresAt {
+ return true
+ }
+
+ return false
+}
+
+func (me *Session) SetExpireInDays(days int) {
+ if me.CreateAt == 0 {
+ me.ExpiresAt = GetMillis() + (1000 * 60 * 60 * 24 * int64(days))
+ } else {
+ me.ExpiresAt = me.CreateAt + (1000 * 60 * 60 * 24 * int64(days))
+ }
+}
+
+func (me *Session) AddProp(key string, value string) {
+
+ if me.Props == nil {
+ me.Props = make(map[string]string)
+ }
+
+ me.Props[key] = value
+}
+
+func (me *Session) GetTeamByTeamId(teamId string) *TeamMember {
+ for _, team := range me.TeamMembers {
+ if team.TeamId == teamId {
+ return team
+ }
+ }
+
+ return nil
+}
+
+func (me *Session) IsMobileApp() bool {
+ return len(me.DeviceId) > 0
+}
+
+func (me *Session) GetUserRoles() []string {
+ return strings.Fields(me.Roles)
+}
+
+func SessionsToJson(o []*Session) string {
+ if b, err := json.Marshal(o); err != nil {
+ return "[]"
+ } else {
+ return string(b)
+ }
+}
+
+func SessionsFromJson(data io.Reader) []*Session {
+ var o []*Session
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/slack_attachment.go b/vendor/github.com/mattermost/mattermost-server/model/slack_attachment.go
new file mode 100644
index 00000000..197d3f0f
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/slack_attachment.go
@@ -0,0 +1,59 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "fmt"
+)
+
+type SlackAttachment struct {
+ Id int64 `json:"id"`
+ Fallback string `json:"fallback"`
+ Color string `json:"color"`
+ Pretext string `json:"pretext"`
+ AuthorName string `json:"author_name"`
+ AuthorLink string `json:"author_link"`
+ AuthorIcon string `json:"author_icon"`
+ Title string `json:"title"`
+ TitleLink string `json:"title_link"`
+ Text string `json:"text"`
+ Fields []*SlackAttachmentField `json:"fields"`
+ ImageURL string `json:"image_url"`
+ ThumbURL string `json:"thumb_url"`
+ Footer string `json:"footer"`
+ FooterIcon string `json:"footer_icon"`
+ Timestamp interface{} `json:"ts"` // This is either a string or an int64
+ Actions []*PostAction `json:"actions,omitempty"`
+}
+
+type SlackAttachmentField struct {
+ Title string `json:"title"`
+ Value interface{} `json:"value"`
+ Short bool `json:"short"`
+}
+
+func StringifySlackFieldValue(a []*SlackAttachment) []*SlackAttachment {
+ var nonNilAttachments []*SlackAttachment
+ for _, attachment := range a {
+ if attachment == nil {
+ continue
+ }
+ nonNilAttachments = append(nonNilAttachments, attachment)
+
+ var nonNilFields []*SlackAttachmentField
+ for _, field := range attachment.Fields {
+ if field == nil {
+ continue
+ }
+ nonNilFields = append(nonNilFields, field)
+
+ if field.Value != nil {
+ // Ensure the value is set to a string if it is set
+ field.Value = fmt.Sprintf("%v", field.Value)
+ }
+ }
+ attachment.Fields = nonNilFields
+ }
+ return nonNilAttachments
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/status.go b/vendor/github.com/mattermost/mattermost-server/model/status.go
new file mode 100644
index 00000000..cd9e32ed
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/status.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+const (
+ STATUS_OFFLINE = "offline"
+ STATUS_AWAY = "away"
+ STATUS_DND = "dnd"
+ STATUS_ONLINE = "online"
+ STATUS_CACHE_SIZE = SESSION_CACHE_SIZE
+ STATUS_CHANNEL_TIMEOUT = 20000 // 20 seconds
+ STATUS_MIN_UPDATE_TIME = 120000 // 2 minutes
+)
+
+type Status struct {
+ UserId string `json:"user_id"`
+ Status string `json:"status"`
+ Manual bool `json:"manual"`
+ LastActivityAt int64 `json:"last_activity_at"`
+ ActiveChannel string `json:"-" db:"-"`
+}
+
+func (o *Status) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func StatusFromJson(data io.Reader) *Status {
+ var o *Status
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func StatusListToJson(u []*Status) string {
+ b, _ := json.Marshal(u)
+ return string(b)
+}
+
+func StatusListFromJson(data io.Reader) []*Status {
+ var statuses []*Status
+ json.NewDecoder(data).Decode(&statuses)
+ return statuses
+}
+
+func StatusMapToInterfaceMap(statusMap map[string]*Status) map[string]interface{} {
+ interfaceMap := map[string]interface{}{}
+ for _, s := range statusMap {
+ // Omitted statues mean offline
+ if s.Status != STATUS_OFFLINE {
+ interfaceMap[s.UserId] = s.Status
+ }
+ }
+ return interfaceMap
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/suggest_command.go b/vendor/github.com/mattermost/mattermost-server/model/suggest_command.go
new file mode 100644
index 00000000..44f46bf7
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/suggest_command.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type SuggestCommand struct {
+ Suggestion string `json:"suggestion"`
+ Description string `json:"description"`
+}
+
+func (o *SuggestCommand) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func SuggestCommandFromJson(data io.Reader) *SuggestCommand {
+ var o *SuggestCommand
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/switch_request.go b/vendor/github.com/mattermost/mattermost-server/model/switch_request.go
new file mode 100644
index 00000000..e153c92f
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/switch_request.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type SwitchRequest struct {
+ CurrentService string `json:"current_service"`
+ NewService string `json:"new_service"`
+ Email string `json:"email"`
+ Password string `json:"password"`
+ NewPassword string `json:"new_password"`
+ MfaCode string `json:"mfa_code"`
+ LdapId string `json:"ldap_id"`
+}
+
+func (o *SwitchRequest) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func SwitchRequestFromJson(data io.Reader) *SwitchRequest {
+ var o *SwitchRequest
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o *SwitchRequest) EmailToOAuth() bool {
+ return o.CurrentService == USER_AUTH_SERVICE_EMAIL &&
+ (o.NewService == USER_AUTH_SERVICE_SAML ||
+ o.NewService == USER_AUTH_SERVICE_GITLAB ||
+ o.NewService == SERVICE_GOOGLE ||
+ o.NewService == SERVICE_OFFICE365)
+}
+
+func (o *SwitchRequest) OAuthToEmail() bool {
+ return (o.CurrentService == USER_AUTH_SERVICE_SAML ||
+ o.CurrentService == USER_AUTH_SERVICE_GITLAB ||
+ o.CurrentService == SERVICE_GOOGLE ||
+ o.CurrentService == SERVICE_OFFICE365) && o.NewService == USER_AUTH_SERVICE_EMAIL
+}
+
+func (o *SwitchRequest) EmailToLdap() bool {
+ return o.CurrentService == USER_AUTH_SERVICE_EMAIL && o.NewService == USER_AUTH_SERVICE_LDAP
+}
+
+func (o *SwitchRequest) LdapToEmail() bool {
+ return o.CurrentService == USER_AUTH_SERVICE_LDAP && o.NewService == USER_AUTH_SERVICE_EMAIL
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/system.go b/vendor/github.com/mattermost/mattermost-server/model/system.go
new file mode 100644
index 00000000..2a636b14
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/system.go
@@ -0,0 +1,46 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "math/big"
+)
+
+const (
+ SYSTEM_DIAGNOSTIC_ID = "DiagnosticId"
+ SYSTEM_RAN_UNIT_TESTS = "RanUnitTests"
+ SYSTEM_LAST_SECURITY_TIME = "LastSecurityTime"
+ SYSTEM_ACTIVE_LICENSE_ID = "ActiveLicenseId"
+ SYSTEM_LAST_COMPLIANCE_TIME = "LastComplianceTime"
+ SYSTEM_ASYMMETRIC_SIGNING_KEY = "AsymmetricSigningKey"
+)
+
+type System struct {
+ Name string `json:"name"`
+ Value string `json:"value"`
+}
+
+func (o *System) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func SystemFromJson(data io.Reader) *System {
+ var o *System
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+type SystemAsymmetricSigningKey struct {
+ ECDSAKey *SystemECDSAKey `json:"ecdsa_key,omitempty"`
+}
+
+type SystemECDSAKey struct {
+ Curve string `json:"curve"`
+ X *big.Int `json:"x"`
+ Y *big.Int `json:"y"`
+ D *big.Int `json:"d,omitempty"`
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/team.go b/vendor/github.com/mattermost/mattermost-server/model/team.go
new file mode 100644
index 00000000..5b6eb1fa
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/team.go
@@ -0,0 +1,294 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ TEAM_OPEN = "O"
+ TEAM_INVITE = "I"
+ TEAM_ALLOWED_DOMAINS_MAX_LENGTH = 500
+ TEAM_COMPANY_NAME_MAX_LENGTH = 64
+ TEAM_DESCRIPTION_MAX_LENGTH = 255
+ TEAM_DISPLAY_NAME_MAX_RUNES = 64
+ TEAM_EMAIL_MAX_LENGTH = 128
+ TEAM_NAME_MAX_LENGTH = 64
+ TEAM_NAME_MIN_LENGTH = 2
+)
+
+type Team struct {
+ Id string `json:"id"`
+ CreateAt int64 `json:"create_at"`
+ UpdateAt int64 `json:"update_at"`
+ DeleteAt int64 `json:"delete_at"`
+ DisplayName string `json:"display_name"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Email string `json:"email"`
+ Type string `json:"type"`
+ CompanyName string `json:"company_name"`
+ AllowedDomains string `json:"allowed_domains"`
+ InviteId string `json:"invite_id"`
+ AllowOpenInvite bool `json:"allow_open_invite"`
+}
+
+type TeamPatch struct {
+ DisplayName *string `json:"display_name"`
+ Description *string `json:"description"`
+ CompanyName *string `json:"company_name"`
+ InviteId *string `json:"invite_id"`
+ AllowOpenInvite *bool `json:"allow_open_invite"`
+}
+
+type Invites struct {
+ Invites []map[string]string `json:"invites"`
+}
+
+func InvitesFromJson(data io.Reader) *Invites {
+ var o *Invites
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o *Invites) ToEmailList() []string {
+ emailList := make([]string, len(o.Invites))
+ for _, invite := range o.Invites {
+ emailList = append(emailList, invite["email"])
+ }
+ return emailList
+}
+
+func (o *Invites) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func (o *Team) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func TeamFromJson(data io.Reader) *Team {
+ var o *Team
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func TeamMapToJson(u map[string]*Team) string {
+ b, _ := json.Marshal(u)
+ return string(b)
+}
+
+func TeamMapFromJson(data io.Reader) map[string]*Team {
+ var teams map[string]*Team
+ json.NewDecoder(data).Decode(&teams)
+ return teams
+}
+
+func TeamListToJson(t []*Team) string {
+ b, _ := json.Marshal(t)
+ return string(b)
+}
+
+func TeamListFromJson(data io.Reader) []*Team {
+ var teams []*Team
+ json.NewDecoder(data).Decode(&teams)
+ return teams
+}
+
+func (o *Team) Etag() string {
+ return Etag(o.Id, o.UpdateAt)
+}
+
+func (o *Team) IsValid() *AppError {
+
+ if len(o.Id) != 26 {
+ return NewAppError("Team.IsValid", "model.team.is_valid.id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if o.CreateAt == 0 {
+ return NewAppError("Team.IsValid", "model.team.is_valid.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if o.UpdateAt == 0 {
+ return NewAppError("Team.IsValid", "model.team.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if len(o.Email) > TEAM_EMAIL_MAX_LENGTH {
+ return NewAppError("Team.IsValid", "model.team.is_valid.email.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if len(o.Email) > 0 && !IsValidEmail(o.Email) {
+ return NewAppError("Team.IsValid", "model.team.is_valid.email.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if utf8.RuneCountInString(o.DisplayName) == 0 || utf8.RuneCountInString(o.DisplayName) > TEAM_DISPLAY_NAME_MAX_RUNES {
+ return NewAppError("Team.IsValid", "model.team.is_valid.name.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if len(o.Name) > TEAM_NAME_MAX_LENGTH {
+ return NewAppError("Team.IsValid", "model.team.is_valid.url.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if len(o.Description) > TEAM_DESCRIPTION_MAX_LENGTH {
+ return NewAppError("Team.IsValid", "model.team.is_valid.description.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if IsReservedTeamName(o.Name) {
+ return NewAppError("Team.IsValid", "model.team.is_valid.reserved.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if !IsValidTeamName(o.Name) {
+ return NewAppError("Team.IsValid", "model.team.is_valid.characters.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if !(o.Type == TEAM_OPEN || o.Type == TEAM_INVITE) {
+ return NewAppError("Team.IsValid", "model.team.is_valid.type.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if len(o.CompanyName) > TEAM_COMPANY_NAME_MAX_LENGTH {
+ return NewAppError("Team.IsValid", "model.team.is_valid.company.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ if len(o.AllowedDomains) > TEAM_ALLOWED_DOMAINS_MAX_LENGTH {
+ return NewAppError("Team.IsValid", "model.team.is_valid.domains.app_error", nil, "id="+o.Id, http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (o *Team) PreSave() {
+ if o.Id == "" {
+ o.Id = NewId()
+ }
+
+ o.CreateAt = GetMillis()
+ o.UpdateAt = o.CreateAt
+
+ if len(o.InviteId) == 0 {
+ o.InviteId = NewId()
+ }
+}
+
+func (o *Team) PreUpdate() {
+ o.UpdateAt = GetMillis()
+}
+
+func IsReservedTeamName(s string) bool {
+ s = strings.ToLower(s)
+
+ for _, value := range reservedName {
+ if strings.Index(s, value) == 0 {
+ return true
+ }
+ }
+
+ return false
+}
+
+func IsValidTeamName(s string) bool {
+
+ if !IsValidAlphaNum(s) {
+ return false
+ }
+
+ if len(s) < TEAM_NAME_MIN_LENGTH {
+ return false
+ }
+
+ return true
+}
+
+var validTeamNameCharacter = regexp.MustCompile(`^[a-z0-9-]$`)
+
+func CleanTeamName(s string) string {
+ s = strings.ToLower(strings.Replace(s, " ", "-", -1))
+
+ for _, value := range reservedName {
+ if strings.Index(s, value) == 0 {
+ s = strings.Replace(s, value, "", -1)
+ }
+ }
+
+ s = strings.TrimSpace(s)
+
+ for _, c := range s {
+ char := fmt.Sprintf("%c", c)
+ if !validTeamNameCharacter.MatchString(char) {
+ s = strings.Replace(s, char, "", -1)
+ }
+ }
+
+ s = strings.Trim(s, "-")
+
+ if !IsValidTeamName(s) {
+ s = NewId()
+ }
+
+ return s
+}
+
+func (o *Team) Sanitize() {
+ o.Email = ""
+ o.AllowedDomains = ""
+}
+
+func (o *Team) SanitizeForNotLoggedIn() {
+ o.Email = ""
+ o.AllowedDomains = ""
+ o.CompanyName = ""
+ if !o.AllowOpenInvite {
+ o.InviteId = ""
+ }
+}
+
+func (t *Team) Patch(patch *TeamPatch) {
+ if patch.DisplayName != nil {
+ t.DisplayName = *patch.DisplayName
+ }
+
+ if patch.Description != nil {
+ t.Description = *patch.Description
+ }
+
+ if patch.CompanyName != nil {
+ t.CompanyName = *patch.CompanyName
+ }
+
+ if patch.InviteId != nil {
+ t.InviteId = *patch.InviteId
+ }
+
+ if patch.AllowOpenInvite != nil {
+ t.AllowOpenInvite = *patch.AllowOpenInvite
+ }
+}
+
+func (t *TeamPatch) ToJson() string {
+ b, err := json.Marshal(t)
+ if err != nil {
+ return ""
+ }
+
+ return string(b)
+}
+
+func TeamPatchFromJson(data io.Reader) *TeamPatch {
+ decoder := json.NewDecoder(data)
+ var team TeamPatch
+ err := decoder.Decode(&team)
+ if err != nil {
+ return nil
+ }
+
+ return &team
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/team_member.go b/vendor/github.com/mattermost/mattermost-server/model/team_member.go
new file mode 100644
index 00000000..2fcd1e15
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/team_member.go
@@ -0,0 +1,94 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "strings"
+)
+
+type TeamMember struct {
+ TeamId string `json:"team_id"`
+ UserId string `json:"user_id"`
+ Roles string `json:"roles"`
+ DeleteAt int64 `json:"delete_at"`
+}
+
+type TeamUnread struct {
+ TeamId string `json:"team_id"`
+ MsgCount int64 `json:"msg_count"`
+ MentionCount int64 `json:"mention_count"`
+}
+
+func (o *TeamMember) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func (o *TeamUnread) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func TeamMemberFromJson(data io.Reader) *TeamMember {
+ var o *TeamMember
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func TeamUnreadFromJson(data io.Reader) *TeamUnread {
+ var o *TeamUnread
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func TeamMembersToJson(o []*TeamMember) string {
+ if b, err := json.Marshal(o); err != nil {
+ return "[]"
+ } else {
+ return string(b)
+ }
+}
+
+func TeamMembersFromJson(data io.Reader) []*TeamMember {
+ var o []*TeamMember
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func TeamsUnreadToJson(o []*TeamUnread) string {
+ if b, err := json.Marshal(o); err != nil {
+ return "[]"
+ } else {
+ return string(b)
+ }
+}
+
+func TeamsUnreadFromJson(data io.Reader) []*TeamUnread {
+ var o []*TeamUnread
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o *TeamMember) IsValid() *AppError {
+
+ if len(o.TeamId) != 26 {
+ return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.team_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(o.UserId) != 26 {
+ return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (o *TeamMember) PreUpdate() {
+}
+
+func (o *TeamMember) GetRoles() []string {
+ return strings.Fields(o.Roles)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/team_search.go b/vendor/github.com/mattermost/mattermost-server/model/team_search.go
new file mode 100644
index 00000000..e0676022
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/team_search.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type TeamSearch struct {
+ Term string `json:"term"`
+}
+
+// ToJson convert a TeamSearch to json string
+func (c *TeamSearch) ToJson() string {
+ b, err := json.Marshal(c)
+ if err != nil {
+ return ""
+ }
+
+ return string(b)
+}
+
+// TeamSearchFromJson decodes the input and returns a TeamSearch
+func TeamSearchFromJson(data io.Reader) *TeamSearch {
+ decoder := json.NewDecoder(data)
+ var cs TeamSearch
+ err := decoder.Decode(&cs)
+ if err == nil {
+ return &cs
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/team_stats.go b/vendor/github.com/mattermost/mattermost-server/model/team_stats.go
new file mode 100644
index 00000000..0d688b80
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/team_stats.go
@@ -0,0 +1,26 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type TeamStats struct {
+ TeamId string `json:"team_id"`
+ TotalMemberCount int64 `json:"total_member_count"`
+ ActiveMemberCount int64 `json:"active_member_count"`
+}
+
+func (o *TeamStats) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func TeamStatsFromJson(data io.Reader) *TeamStats {
+ var o *TeamStats
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/token.go b/vendor/github.com/mattermost/mattermost-server/model/token.go
new file mode 100644
index 00000000..a4d10c7f
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/token.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import "net/http"
+
+const (
+ TOKEN_SIZE = 64
+ MAX_TOKEN_EXIPRY_TIME = 1000 * 60 * 60 * 24 // 24 hour
+ TOKEN_TYPE_OAUTH = "oauth"
+)
+
+type Token struct {
+ Token string
+ CreateAt int64
+ Type string
+ Extra string
+}
+
+func NewToken(tokentype, extra string) *Token {
+ return &Token{
+ Token: NewRandomString(TOKEN_SIZE),
+ CreateAt: GetMillis(),
+ Type: tokentype,
+ Extra: extra,
+ }
+}
+
+func (t *Token) IsValid() *AppError {
+ if len(t.Token) != TOKEN_SIZE {
+ return NewAppError("Token.IsValid", "model.token.is_valid.size", nil, "", http.StatusInternalServerError)
+ }
+
+ if t.CreateAt == 0 {
+ return NewAppError("Token.IsValid", "model.token.is_valid.expiry", nil, "", http.StatusInternalServerError)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/user.go b/vendor/github.com/mattermost/mattermost-server/model/user.go
new file mode 100644
index 00000000..1e1d49f7
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/user.go
@@ -0,0 +1,616 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/crypto/bcrypt"
+)
+
+const (
+ ME = "me"
+ USER_NOTIFY_ALL = "all"
+ USER_NOTIFY_MENTION = "mention"
+ USER_NOTIFY_NONE = "none"
+ DESKTOP_NOTIFY_PROP = "desktop"
+ DESKTOP_SOUND_NOTIFY_PROP = "desktop_sound"
+ DESKTOP_DURATION_NOTIFY_PROP = "desktop_duration"
+ MARK_UNREAD_NOTIFY_PROP = "mark_unread"
+ PUSH_NOTIFY_PROP = "push"
+ PUSH_STATUS_NOTIFY_PROP = "push_status"
+ EMAIL_NOTIFY_PROP = "email"
+ CHANNEL_MENTIONS_NOTIFY_PROP = "channel"
+ COMMENTS_NOTIFY_PROP = "comments"
+ MENTION_KEYS_NOTIFY_PROP = "mention_keys"
+ COMMENTS_NOTIFY_NEVER = "never"
+ COMMENTS_NOTIFY_ROOT = "root"
+ COMMENTS_NOTIFY_ANY = "any"
+
+ DEFAULT_LOCALE = "en"
+ USER_AUTH_SERVICE_EMAIL = "email"
+
+ USER_EMAIL_MAX_LENGTH = 128
+ USER_NICKNAME_MAX_RUNES = 64
+ USER_POSITION_MAX_RUNES = 128
+ USER_FIRST_NAME_MAX_RUNES = 64
+ USER_LAST_NAME_MAX_RUNES = 64
+ USER_AUTH_DATA_MAX_LENGTH = 128
+ USER_NAME_MAX_LENGTH = 64
+ USER_NAME_MIN_LENGTH = 1
+ USER_PASSWORD_MAX_LENGTH = 72
+)
+
+type User struct {
+ Id string `json:"id"`
+ CreateAt int64 `json:"create_at,omitempty"`
+ UpdateAt int64 `json:"update_at,omitempty"`
+ DeleteAt int64 `json:"delete_at"`
+ Username string `json:"username"`
+ Password string `json:"password,omitempty"`
+ AuthData *string `json:"auth_data,omitempty"`
+ AuthService string `json:"auth_service"`
+ Email string `json:"email"`
+ EmailVerified bool `json:"email_verified,omitempty"`
+ Nickname string `json:"nickname"`
+ FirstName string `json:"first_name"`
+ LastName string `json:"last_name"`
+ Position string `json:"position"`
+ Roles string `json:"roles"`
+ AllowMarketing bool `json:"allow_marketing,omitempty"`
+ Props StringMap `json:"props,omitempty"`
+ NotifyProps StringMap `json:"notify_props,omitempty"`
+ LastPasswordUpdate int64 `json:"last_password_update,omitempty"`
+ LastPictureUpdate int64 `json:"last_picture_update,omitempty"`
+ FailedAttempts int `json:"failed_attempts,omitempty"`
+ Locale string `json:"locale"`
+ MfaActive bool `json:"mfa_active,omitempty"`
+ MfaSecret string `json:"mfa_secret,omitempty"`
+ LastActivityAt int64 `db:"-" json:"last_activity_at,omitempty"`
+}
+
+type UserPatch struct {
+ Username *string `json:"username"`
+ Nickname *string `json:"nickname"`
+ FirstName *string `json:"first_name"`
+ LastName *string `json:"last_name"`
+ Position *string `json:"position"`
+ Email *string `json:"email"`
+ Props StringMap `json:"props,omitempty"`
+ NotifyProps StringMap `json:"notify_props,omitempty"`
+ Locale *string `json:"locale"`
+}
+
+type UserAuth struct {
+ Password string `json:"password,omitempty"`
+ AuthData *string `json:"auth_data,omitempty"`
+ AuthService string `json:"auth_service,omitempty"`
+}
+
+// IsValid validates the user and returns an error if it isn't configured
+// correctly.
+func (u *User) IsValid() *AppError {
+
+ if len(u.Id) != 26 {
+ return InvalidUserError("id", "")
+ }
+
+ if u.CreateAt == 0 {
+ return InvalidUserError("create_at", u.Id)
+ }
+
+ if u.UpdateAt == 0 {
+ return InvalidUserError("update_at", u.Id)
+ }
+
+ if !IsValidUsername(u.Username) {
+ return InvalidUserError("username", u.Id)
+ }
+
+ if len(u.Email) > USER_EMAIL_MAX_LENGTH || len(u.Email) == 0 {
+ return InvalidUserError("email", u.Id)
+ }
+
+ if utf8.RuneCountInString(u.Nickname) > USER_NICKNAME_MAX_RUNES {
+ return InvalidUserError("nickname", u.Id)
+ }
+
+ if utf8.RuneCountInString(u.Position) > USER_POSITION_MAX_RUNES {
+ return InvalidUserError("position", u.Id)
+ }
+
+ if utf8.RuneCountInString(u.FirstName) > USER_FIRST_NAME_MAX_RUNES {
+ return InvalidUserError("first_name", u.Id)
+ }
+
+ if utf8.RuneCountInString(u.LastName) > USER_LAST_NAME_MAX_RUNES {
+ return InvalidUserError("last_name", u.Id)
+ }
+
+ if u.AuthData != nil && len(*u.AuthData) > USER_AUTH_DATA_MAX_LENGTH {
+ return InvalidUserError("auth_data", u.Id)
+ }
+
+ if u.AuthData != nil && len(*u.AuthData) > 0 && len(u.AuthService) == 0 {
+ return InvalidUserError("auth_data_type", u.Id)
+ }
+
+ if len(u.Password) > 0 && u.AuthData != nil && len(*u.AuthData) > 0 {
+ return InvalidUserError("auth_data_pwd", u.Id)
+ }
+
+ if len(u.Password) > USER_PASSWORD_MAX_LENGTH {
+ return InvalidUserError("password_limit", u.Id)
+ }
+
+ return nil
+}
+
+func InvalidUserError(fieldName string, userId string) *AppError {
+ id := fmt.Sprintf("model.user.is_valid.%s.app_error", fieldName)
+ details := ""
+ if userId != "" {
+ details = "user_id=" + userId
+ }
+ return NewAppError("User.IsValid", id, nil, details, http.StatusBadRequest)
+}
+
+func NormalizeUsername(username string) string {
+ return strings.ToLower(username)
+}
+
+func NormalizeEmail(email string) string {
+ return strings.ToLower(email)
+}
+
+// PreSave will set the Id and Username if missing. It will also fill
+// in the CreateAt, UpdateAt times. It will also hash the password. It should
+// be run before saving the user to the db.
+func (u *User) PreSave() {
+ if u.Id == "" {
+ u.Id = NewId()
+ }
+
+ if u.Username == "" {
+ u.Username = NewId()
+ }
+
+ if u.AuthData != nil && *u.AuthData == "" {
+ u.AuthData = nil
+ }
+
+ u.Username = NormalizeUsername(u.Username)
+ u.Email = NormalizeEmail(u.Email)
+
+ u.CreateAt = GetMillis()
+ u.UpdateAt = u.CreateAt
+
+ u.LastPasswordUpdate = u.CreateAt
+
+ u.MfaActive = false
+
+ if u.Locale == "" {
+ u.Locale = DEFAULT_LOCALE
+ }
+
+ if u.Props == nil {
+ u.Props = make(map[string]string)
+ }
+
+ if u.NotifyProps == nil || len(u.NotifyProps) == 0 {
+ u.SetDefaultNotifications()
+ }
+
+ if len(u.Password) > 0 {
+ u.Password = HashPassword(u.Password)
+ }
+}
+
+// PreUpdate should be run before updating the user in the db.
+func (u *User) PreUpdate() {
+ u.Username = NormalizeUsername(u.Username)
+ u.Email = NormalizeEmail(u.Email)
+ u.UpdateAt = GetMillis()
+
+ if u.AuthData != nil && *u.AuthData == "" {
+ u.AuthData = nil
+ }
+
+ if u.NotifyProps == nil || len(u.NotifyProps) == 0 {
+ u.SetDefaultNotifications()
+ } else if _, ok := u.NotifyProps["mention_keys"]; ok {
+ // Remove any blank mention keys
+ splitKeys := strings.Split(u.NotifyProps["mention_keys"], ",")
+ goodKeys := []string{}
+ for _, key := range splitKeys {
+ if len(key) > 0 {
+ goodKeys = append(goodKeys, strings.ToLower(key))
+ }
+ }
+ u.NotifyProps["mention_keys"] = strings.Join(goodKeys, ",")
+ }
+}
+
+func (u *User) SetDefaultNotifications() {
+ u.NotifyProps = make(map[string]string)
+ u.NotifyProps["email"] = "true"
+ u.NotifyProps["push"] = USER_NOTIFY_MENTION
+ u.NotifyProps["desktop"] = USER_NOTIFY_MENTION
+ u.NotifyProps["desktop_sound"] = "true"
+ u.NotifyProps["mention_keys"] = u.Username + ",@" + u.Username
+ u.NotifyProps["channel"] = "true"
+ u.NotifyProps["push_status"] = STATUS_AWAY
+ u.NotifyProps["comments"] = "never"
+ u.NotifyProps["first_name"] = "false"
+}
+
+func (user *User) UpdateMentionKeysFromUsername(oldUsername string) {
+ nonUsernameKeys := []string{}
+ splitKeys := strings.Split(user.NotifyProps["mention_keys"], ",")
+ for _, key := range splitKeys {
+ if key != oldUsername && key != "@"+oldUsername {
+ nonUsernameKeys = append(nonUsernameKeys, key)
+ }
+ }
+
+ user.NotifyProps["mention_keys"] = user.Username + ",@" + user.Username
+ if len(nonUsernameKeys) > 0 {
+ user.NotifyProps["mention_keys"] += "," + strings.Join(nonUsernameKeys, ",")
+ }
+}
+
+func (u *User) Patch(patch *UserPatch) {
+ if patch.Username != nil {
+ u.Username = *patch.Username
+ }
+
+ if patch.Nickname != nil {
+ u.Nickname = *patch.Nickname
+ }
+
+ if patch.FirstName != nil {
+ u.FirstName = *patch.FirstName
+ }
+
+ if patch.LastName != nil {
+ u.LastName = *patch.LastName
+ }
+
+ if patch.Position != nil {
+ u.Position = *patch.Position
+ }
+
+ if patch.Email != nil {
+ u.Email = *patch.Email
+ }
+
+ if patch.Props != nil {
+ u.Props = patch.Props
+ }
+
+ if patch.NotifyProps != nil {
+ u.NotifyProps = patch.NotifyProps
+ }
+
+ if patch.Locale != nil {
+ u.Locale = *patch.Locale
+ }
+}
+
+// ToJson convert a User to a json string
+func (u *User) ToJson() string {
+ b, _ := json.Marshal(u)
+ return string(b)
+}
+
+func (u *UserPatch) ToJson() string {
+ b, _ := json.Marshal(u)
+ return string(b)
+}
+
+func (u *UserAuth) ToJson() string {
+ b, _ := json.Marshal(u)
+ return string(b)
+}
+
+// Generate a valid strong etag so the browser can cache the results
+func (u *User) Etag(showFullName, showEmail bool) string {
+ return Etag(u.Id, u.UpdateAt, showFullName, showEmail)
+}
+
+// Remove any private data from the user object
+func (u *User) Sanitize(options map[string]bool) {
+ u.Password = ""
+ u.AuthData = NewString("")
+ u.MfaSecret = ""
+
+ if len(options) != 0 && !options["email"] {
+ u.Email = ""
+ }
+ if len(options) != 0 && !options["fullname"] {
+ u.FirstName = ""
+ u.LastName = ""
+ }
+ if len(options) != 0 && !options["passwordupdate"] {
+ u.LastPasswordUpdate = 0
+ }
+ if len(options) != 0 && !options["authservice"] {
+ u.AuthService = ""
+ }
+}
+
+func (u *User) ClearNonProfileFields() {
+ u.Password = ""
+ u.AuthData = NewString("")
+ u.MfaSecret = ""
+ u.EmailVerified = false
+ u.AllowMarketing = false
+ u.NotifyProps = StringMap{}
+ u.LastPasswordUpdate = 0
+ u.FailedAttempts = 0
+}
+
+func (u *User) SanitizeProfile(options map[string]bool) {
+ u.ClearNonProfileFields()
+
+ u.Sanitize(options)
+}
+
+func (u *User) MakeNonNil() {
+ if u.Props == nil {
+ u.Props = make(map[string]string)
+ }
+
+ if u.NotifyProps == nil {
+ u.NotifyProps = make(map[string]string)
+ }
+}
+
+func (u *User) AddProp(key string, value string) {
+ u.MakeNonNil()
+
+ u.Props[key] = value
+}
+
+func (u *User) AddNotifyProp(key string, value string) {
+ u.MakeNonNil()
+
+ u.NotifyProps[key] = value
+}
+
+func (u *User) GetFullName() string {
+ if u.FirstName != "" && u.LastName != "" {
+ return u.FirstName + " " + u.LastName
+ } else if u.FirstName != "" {
+ return u.FirstName
+ } else if u.LastName != "" {
+ return u.LastName
+ } else {
+ return ""
+ }
+}
+
+func (u *User) GetDisplayName(nameFormat string) string {
+ displayName := u.Username
+
+ if nameFormat == SHOW_NICKNAME_FULLNAME {
+ if u.Nickname != "" {
+ displayName = u.Nickname
+ } else if fullName := u.GetFullName(); fullName != "" {
+ displayName = fullName
+ }
+ } else if nameFormat == SHOW_FULLNAME {
+ if fullName := u.GetFullName(); fullName != "" {
+ displayName = fullName
+ }
+ }
+
+ return displayName
+}
+
+func (u *User) GetRoles() []string {
+ return strings.Fields(u.Roles)
+}
+
+func (u *User) GetRawRoles() string {
+ return u.Roles
+}
+
+func IsValidUserRoles(userRoles string) bool {
+
+ roles := strings.Fields(userRoles)
+
+ for _, r := range roles {
+ if !isValidRole(r) {
+ return false
+ }
+ }
+
+ // Exclude just the system_admin role explicitly to prevent mistakes
+ if len(roles) == 1 && roles[0] == "system_admin" {
+ return false
+ }
+
+ return true
+}
+
+func isValidRole(roleId string) bool {
+ _, ok := DefaultRoles[roleId]
+ return ok
+}
+
+// Make sure you acually want to use this function. In context.go there are functions to check permissions
+// This function should not be used to check permissions.
+func (u *User) IsInRole(inRole string) bool {
+ return IsInRole(u.Roles, inRole)
+}
+
+// Make sure you acually want to use this function. In context.go there are functions to check permissions
+// This function should not be used to check permissions.
+func IsInRole(userRoles string, inRole string) bool {
+ roles := strings.Split(userRoles, " ")
+
+ for _, r := range roles {
+ if r == inRole {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (u *User) IsSSOUser() bool {
+ return u.AuthService != "" && u.AuthService != USER_AUTH_SERVICE_EMAIL
+}
+
+func (u *User) IsOAuthUser() bool {
+ return u.AuthService == USER_AUTH_SERVICE_GITLAB
+}
+
+func (u *User) IsLDAPUser() bool {
+ return u.AuthService == USER_AUTH_SERVICE_LDAP
+}
+
+func (u *User) IsSAMLUser() bool {
+ return u.AuthService == USER_AUTH_SERVICE_SAML
+}
+
+// UserFromJson will decode the input and return a User
+func UserFromJson(data io.Reader) *User {
+ var user *User
+ json.NewDecoder(data).Decode(&user)
+ return user
+}
+
+func UserPatchFromJson(data io.Reader) *UserPatch {
+ var user *UserPatch
+ json.NewDecoder(data).Decode(&user)
+ return user
+}
+
+func UserAuthFromJson(data io.Reader) *UserAuth {
+ var user *UserAuth
+ json.NewDecoder(data).Decode(&user)
+ return user
+}
+
+func UserMapToJson(u map[string]*User) string {
+ b, _ := json.Marshal(u)
+ return string(b)
+}
+
+func UserMapFromJson(data io.Reader) map[string]*User {
+ var users map[string]*User
+ json.NewDecoder(data).Decode(&users)
+ return users
+}
+
+func UserListToJson(u []*User) string {
+ b, _ := json.Marshal(u)
+ return string(b)
+}
+
+func UserListFromJson(data io.Reader) []*User {
+ var users []*User
+ json.NewDecoder(data).Decode(&users)
+ return users
+}
+
+// HashPassword generates a hash using the bcrypt.GenerateFromPassword
+func HashPassword(password string) string {
+ hash, err := bcrypt.GenerateFromPassword([]byte(password), 10)
+ if err != nil {
+ panic(err)
+ }
+
+ return string(hash)
+}
+
+// ComparePassword compares the hash
+func ComparePassword(hash string, password string) bool {
+
+ if len(password) == 0 || len(hash) == 0 {
+ return false
+ }
+
+ err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(password))
+ return err == nil
+}
+
+var validUsernameChars = regexp.MustCompile(`^[a-z0-9\.\-_]+$`)
+
+var restrictedUsernames = []string{
+ "all",
+ "channel",
+ "matterbot",
+}
+
+func IsValidUsername(s string) bool {
+ if len(s) < USER_NAME_MIN_LENGTH || len(s) > USER_NAME_MAX_LENGTH {
+ return false
+ }
+
+ if !validUsernameChars.MatchString(s) {
+ return false
+ }
+
+ for _, restrictedUsername := range restrictedUsernames {
+ if s == restrictedUsername {
+ return false
+ }
+ }
+
+ return true
+}
+
+func CleanUsername(s string) string {
+ s = NormalizeUsername(strings.Replace(s, " ", "-", -1))
+
+ for _, value := range reservedName {
+ if s == value {
+ s = strings.Replace(s, value, "", -1)
+ }
+ }
+
+ s = strings.TrimSpace(s)
+
+ for _, c := range s {
+ char := fmt.Sprintf("%c", c)
+ if !validUsernameChars.MatchString(char) {
+ s = strings.Replace(s, char, "-", -1)
+ }
+ }
+
+ s = strings.Trim(s, "-")
+
+ if !IsValidUsername(s) {
+ s = "a" + NewId()
+ }
+
+ return s
+}
+
+func IsValidUserNotifyLevel(notifyLevel string) bool {
+ return notifyLevel == CHANNEL_NOTIFY_ALL ||
+ notifyLevel == CHANNEL_NOTIFY_MENTION ||
+ notifyLevel == CHANNEL_NOTIFY_NONE
+}
+
+func IsValidPushStatusNotifyLevel(notifyLevel string) bool {
+ return notifyLevel == STATUS_ONLINE ||
+ notifyLevel == STATUS_AWAY ||
+ notifyLevel == STATUS_OFFLINE
+}
+
+func IsValidCommentsNotifyLevel(notifyLevel string) bool {
+ return notifyLevel == COMMENTS_NOTIFY_ANY ||
+ notifyLevel == COMMENTS_NOTIFY_ROOT ||
+ notifyLevel == COMMENTS_NOTIFY_NEVER
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/user_access_token.go b/vendor/github.com/mattermost/mattermost-server/model/user_access_token.go
new file mode 100644
index 00000000..bffd9fcb
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/user_access_token.go
@@ -0,0 +1,65 @@
+// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+)
+
+type UserAccessToken struct {
+ Id string `json:"id"`
+ Token string `json:"token,omitempty"`
+ UserId string `json:"user_id"`
+ Description string `json:"description"`
+ IsActive bool `json:"is_active"`
+}
+
+func (t *UserAccessToken) IsValid() *AppError {
+ if len(t.Id) != 26 {
+ return NewAppError("UserAccessToken.IsValid", "model.user_access_token.is_valid.id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(t.Token) != 26 {
+ return NewAppError("UserAccessToken.IsValid", "model.user_access_token.is_valid.token.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(t.UserId) != 26 {
+ return NewAppError("UserAccessToken.IsValid", "model.user_access_token.is_valid.user_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if len(t.Description) > 255 {
+ return NewAppError("UserAccessToken.IsValid", "model.user_access_token.is_valid.description.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ return nil
+}
+
+func (t *UserAccessToken) PreSave() {
+ t.Id = NewId()
+ t.IsActive = true
+}
+
+func (t *UserAccessToken) ToJson() string {
+ b, _ := json.Marshal(t)
+ return string(b)
+}
+
+func UserAccessTokenFromJson(data io.Reader) *UserAccessToken {
+ var t *UserAccessToken
+ json.NewDecoder(data).Decode(&t)
+ return t
+}
+
+func UserAccessTokenListToJson(t []*UserAccessToken) string {
+ b, _ := json.Marshal(t)
+ return string(b)
+}
+
+func UserAccessTokenListFromJson(data io.Reader) []*UserAccessToken {
+ var t []*UserAccessToken
+ json.NewDecoder(data).Decode(&t)
+ return t
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/user_access_token_search.go b/vendor/github.com/mattermost/mattermost-server/model/user_access_token_search.go
new file mode 100644
index 00000000..1b0146ed
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/user_access_token_search.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type UserAccessTokenSearch struct {
+ Term string `json:"term"`
+}
+
+// ToJson convert a UserAccessTokenSearch to json string
+func (c *UserAccessTokenSearch) ToJson() string {
+ b, err := json.Marshal(c)
+ if err != nil {
+ return ""
+ }
+
+ return string(b)
+}
+
+// UserAccessTokenSearchJson decodes the input and returns a UserAccessTokenSearch
+func UserAccessTokenSearchFromJson(data io.Reader) *UserAccessTokenSearch {
+ decoder := json.NewDecoder(data)
+ var cs UserAccessTokenSearch
+ err := decoder.Decode(&cs)
+ if err == nil {
+ return &cs
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/user_autocomplete.go b/vendor/github.com/mattermost/mattermost-server/model/user_autocomplete.go
new file mode 100644
index 00000000..b5edb45b
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/user_autocomplete.go
@@ -0,0 +1,61 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type UserAutocompleteInChannel struct {
+ InChannel []*User `json:"in_channel"`
+ OutOfChannel []*User `json:"out_of_channel"`
+}
+
+type UserAutocompleteInTeam struct {
+ InTeam []*User `json:"in_team"`
+}
+
+type UserAutocomplete struct {
+ Users []*User `json:"users"`
+ OutOfChannel []*User `json:"out_of_channel,omitempty"`
+}
+
+func (o *UserAutocomplete) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func UserAutocompleteFromJson(data io.Reader) *UserAutocomplete {
+ decoder := json.NewDecoder(data)
+ autocomplete := new(UserAutocomplete)
+ err := decoder.Decode(&autocomplete)
+ if err == nil {
+ return autocomplete
+ } else {
+ return nil
+ }
+}
+
+func (o *UserAutocompleteInChannel) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func UserAutocompleteInChannelFromJson(data io.Reader) *UserAutocompleteInChannel {
+ var o *UserAutocompleteInChannel
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o *UserAutocompleteInTeam) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func UserAutocompleteInTeamFromJson(data io.Reader) *UserAutocompleteInTeam {
+ var o *UserAutocompleteInTeam
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/user_search.go b/vendor/github.com/mattermost/mattermost-server/model/user_search.go
new file mode 100644
index 00000000..94596bdc
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/user_search.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type UserSearch struct {
+ Term string `json:"term"`
+ TeamId string `json:"team_id"`
+ NotInTeamId string `json:"not_in_team_id"`
+ InChannelId string `json:"in_channel_id"`
+ NotInChannelId string `json:"not_in_channel_id"`
+ AllowInactive bool `json:"allow_inactive"`
+ WithoutTeam bool `json:"without_team"`
+}
+
+// ToJson convert a User to a json string
+func (u *UserSearch) ToJson() string {
+ b, _ := json.Marshal(u)
+ return string(b)
+}
+
+// UserSearchFromJson will decode the input and return a User
+func UserSearchFromJson(data io.Reader) *UserSearch {
+ var us *UserSearch
+ json.NewDecoder(data).Decode(&us)
+ return us
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/utils.go b/vendor/github.com/mattermost/mattermost-server/model/utils.go
new file mode 100644
index 00000000..331a1aaa
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/utils.go
@@ -0,0 +1,486 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/base32"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+
+ goi18n "github.com/nicksnyder/go-i18n/i18n"
+ "github.com/pborman/uuid"
+)
+
+const (
+ LOWERCASE_LETTERS = "abcdefghijklmnopqrstuvwxyz"
+ UPPERCASE_LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ NUMBERS = "0123456789"
+ SYMBOLS = " !\"\\#$%&'()*+,-./:;<=>?@[]^_`|~"
+)
+
+type StringInterface map[string]interface{}
+type StringMap map[string]string
+type StringArray []string
+
+var translateFunc goi18n.TranslateFunc = nil
+
+func AppErrorInit(t goi18n.TranslateFunc) {
+ translateFunc = t
+}
+
+type AppError struct {
+ Id string `json:"id"`
+ Message string `json:"message"` // Message to be display to the end user without debugging information
+ DetailedError string `json:"detailed_error"` // Internal error string to help the developer
+ RequestId string `json:"request_id,omitempty"` // The RequestId that's also set in the header
+ StatusCode int `json:"status_code,omitempty"` // The http status code
+ Where string `json:"-"` // The function where it happened in the form of Struct.Func
+ IsOAuth bool `json:"is_oauth,omitempty"` // Whether the error is OAuth specific
+ params map[string]interface{}
+}
+
+func (er *AppError) Error() string {
+ return er.Where + ": " + er.Message + ", " + er.DetailedError
+}
+
+func (er *AppError) Translate(T goi18n.TranslateFunc) {
+ if T == nil {
+ er.Message = er.Id
+ return
+ }
+
+ if er.params == nil {
+ er.Message = T(er.Id)
+ } else {
+ er.Message = T(er.Id, er.params)
+ }
+}
+
+func (er *AppError) SystemMessage(T goi18n.TranslateFunc) string {
+ if er.params == nil {
+ return T(er.Id)
+ } else {
+ return T(er.Id, er.params)
+ }
+}
+
+func (er *AppError) ToJson() string {
+ b, _ := json.Marshal(er)
+ return string(b)
+}
+
+// AppErrorFromJson will decode the input and return an AppError
+func AppErrorFromJson(data io.Reader) *AppError {
+ str := ""
+ bytes, rerr := ioutil.ReadAll(data)
+ if rerr != nil {
+ str = rerr.Error()
+ } else {
+ str = string(bytes)
+ }
+
+ decoder := json.NewDecoder(strings.NewReader(str))
+ var er AppError
+ err := decoder.Decode(&er)
+ if err == nil {
+ return &er
+ } else {
+ return NewAppError("AppErrorFromJson", "model.utils.decode_json.app_error", nil, "body: "+str, http.StatusInternalServerError)
+ }
+}
+
+func NewAppError(where string, id string, params map[string]interface{}, details string, status int) *AppError {
+ ap := &AppError{}
+ ap.Id = id
+ ap.params = params
+ ap.Message = id
+ ap.Where = where
+ ap.DetailedError = details
+ ap.StatusCode = status
+ ap.IsOAuth = false
+ ap.Translate(translateFunc)
+ return ap
+}
+
+var encoding = base32.NewEncoding("ybndrfg8ejkmcpqxot1uwisza345h769")
+
+// NewId is a globally unique identifier. It is a [A-Z0-9] string 26
+// characters long. It is a UUID version 4 Guid that is zbased32 encoded
+// with the padding stripped off.
+func NewId() string {
+ var b bytes.Buffer
+ encoder := base32.NewEncoder(encoding, &b)
+ encoder.Write(uuid.NewRandom())
+ encoder.Close()
+ b.Truncate(26) // removes the '==' padding
+ return b.String()
+}
+
+func NewRandomString(length int) string {
+ var b bytes.Buffer
+ str := make([]byte, length+8)
+ rand.Read(str)
+ encoder := base32.NewEncoder(encoding, &b)
+ encoder.Write(str)
+ encoder.Close()
+ b.Truncate(length) // removes the '==' padding
+ return b.String()
+}
+
+// GetMillis is a convience method to get milliseconds since epoch.
+func GetMillis() int64 {
+ return time.Now().UnixNano() / int64(time.Millisecond)
+}
+
+// MapToJson converts a map to a json string
+func MapToJson(objmap map[string]string) string {
+ b, _ := json.Marshal(objmap)
+ return string(b)
+}
+
+// MapToJson converts a map to a json string
+func MapBoolToJson(objmap map[string]bool) string {
+ b, _ := json.Marshal(objmap)
+ return string(b)
+}
+
+// MapFromJson will decode the key/value pair map
+func MapFromJson(data io.Reader) map[string]string {
+ decoder := json.NewDecoder(data)
+
+ var objmap map[string]string
+ if err := decoder.Decode(&objmap); err != nil {
+ return make(map[string]string)
+ } else {
+ return objmap
+ }
+}
+
+// MapFromJson will decode the key/value pair map
+func MapBoolFromJson(data io.Reader) map[string]bool {
+ decoder := json.NewDecoder(data)
+
+ var objmap map[string]bool
+ if err := decoder.Decode(&objmap); err != nil {
+ return make(map[string]bool)
+ } else {
+ return objmap
+ }
+}
+
+func ArrayToJson(objmap []string) string {
+ b, _ := json.Marshal(objmap)
+ return string(b)
+}
+
+func ArrayFromJson(data io.Reader) []string {
+ decoder := json.NewDecoder(data)
+
+ var objmap []string
+ if err := decoder.Decode(&objmap); err != nil {
+ return make([]string, 0)
+ } else {
+ return objmap
+ }
+}
+
+func ArrayFromInterface(data interface{}) []string {
+ stringArray := []string{}
+
+ dataArray, ok := data.([]interface{})
+ if !ok {
+ return stringArray
+ }
+
+ for _, v := range dataArray {
+ if str, ok := v.(string); ok {
+ stringArray = append(stringArray, str)
+ }
+ }
+
+ return stringArray
+}
+
+func StringInterfaceToJson(objmap map[string]interface{}) string {
+ b, _ := json.Marshal(objmap)
+ return string(b)
+}
+
+func StringInterfaceFromJson(data io.Reader) map[string]interface{} {
+ decoder := json.NewDecoder(data)
+
+ var objmap map[string]interface{}
+ if err := decoder.Decode(&objmap); err != nil {
+ return make(map[string]interface{})
+ } else {
+ return objmap
+ }
+}
+
+func StringToJson(s string) string {
+ b, _ := json.Marshal(s)
+ return string(b)
+}
+
+func StringFromJson(data io.Reader) string {
+ decoder := json.NewDecoder(data)
+
+ var s string
+ if err := decoder.Decode(&s); err != nil {
+ return ""
+ } else {
+ return s
+ }
+}
+
+func GetServerIpAddress() string {
+ if addrs, err := net.InterfaceAddrs(); err != nil {
+ return ""
+ } else {
+ for _, addr := range addrs {
+
+ if ip, ok := addr.(*net.IPNet); ok && !ip.IP.IsLoopback() {
+ if ip.IP.To4() != nil {
+ return ip.IP.String()
+ }
+ }
+ }
+ }
+
+ return ""
+}
+
+func IsLower(s string) bool {
+ return strings.ToLower(s) == s
+}
+
+func IsValidEmail(email string) bool {
+
+ if !IsLower(email) {
+ return false
+ }
+
+ if _, err := mail.ParseAddress(email); err == nil {
+ return true
+ }
+
+ return false
+}
+
+var reservedName = []string{
+ "signup",
+ "login",
+ "admin",
+ "channel",
+ "post",
+ "api",
+ "oauth",
+}
+
+func IsValidChannelIdentifier(s string) bool {
+
+ if !IsValidAlphaNumHyphenUnderscore(s, true) {
+ return false
+ }
+
+ if len(s) < CHANNEL_NAME_MIN_LENGTH {
+ return false
+ }
+
+ return true
+}
+
+func IsValidAlphaNum(s string) bool {
+ validAlphaNum := regexp.MustCompile(`^[a-z0-9]+([a-z\-0-9]+|(__)?)[a-z0-9]+$`)
+
+ return validAlphaNum.MatchString(s)
+}
+
+func IsValidAlphaNumHyphenUnderscore(s string, withFormat bool) bool {
+ if withFormat {
+ validAlphaNumHyphenUnderscore := regexp.MustCompile(`^[a-z0-9]+([a-z\-\_0-9]+|(__)?)[a-z0-9]+$`)
+ return validAlphaNumHyphenUnderscore.MatchString(s)
+ }
+
+ validSimpleAlphaNumHyphenUnderscore := regexp.MustCompile(`^[a-zA-Z0-9\-_]+$`)
+ return validSimpleAlphaNumHyphenUnderscore.MatchString(s)
+}
+
+func Etag(parts ...interface{}) string {
+
+ etag := CurrentVersion
+
+ for _, part := range parts {
+ etag += fmt.Sprintf(".%v", part)
+ }
+
+ return etag
+}
+
+var validHashtag = regexp.MustCompile(`^(#\pL[\pL\d\-_.]*[\pL\d])$`)
+var puncStart = regexp.MustCompile(`^[^\pL\d\s#]+`)
+var hashtagStart = regexp.MustCompile(`^#{2,}`)
+var puncEnd = regexp.MustCompile(`[^\pL\d\s]+$`)
+
+func ParseHashtags(text string) (string, string) {
+ words := strings.Fields(text)
+
+ hashtagString := ""
+ plainString := ""
+ for _, word := range words {
+ // trim off surrounding punctuation
+ word = puncStart.ReplaceAllString(word, "")
+ word = puncEnd.ReplaceAllString(word, "")
+
+ // and remove extra pound #s
+ word = hashtagStart.ReplaceAllString(word, "#")
+
+ if validHashtag.MatchString(word) {
+ hashtagString += " " + word
+ } else {
+ plainString += " " + word
+ }
+ }
+
+ if len(hashtagString) > 1000 {
+ hashtagString = hashtagString[:999]
+ lastSpace := strings.LastIndex(hashtagString, " ")
+ if lastSpace > -1 {
+ hashtagString = hashtagString[:lastSpace]
+ } else {
+ hashtagString = ""
+ }
+ }
+
+ return strings.TrimSpace(hashtagString), strings.TrimSpace(plainString)
+}
+
+func IsFileExtImage(ext string) bool {
+ ext = strings.ToLower(ext)
+ for _, imgExt := range IMAGE_EXTENSIONS {
+ if ext == imgExt {
+ return true
+ }
+ }
+ return false
+}
+
+func GetImageMimeType(ext string) string {
+ ext = strings.ToLower(ext)
+ if len(IMAGE_MIME_TYPES[ext]) == 0 {
+ return "image"
+ } else {
+ return IMAGE_MIME_TYPES[ext]
+ }
+}
+
+func ClearMentionTags(post string) string {
+ post = strings.Replace(post, "<mention>", "", -1)
+ post = strings.Replace(post, "</mention>", "", -1)
+ return post
+}
+
+var UrlRegex = regexp.MustCompile(`^((?:[a-z]+:\/\/)?(?:(?:[a-z0-9\-]+\.)+(?:[a-z]{2}|aero|arpa|biz|com|coop|edu|gov|info|int|jobs|mil|museum|name|nato|net|org|pro|travel|local|internal))(:[0-9]{1,5})?(?:\/[a-z0-9_\-\.~]+)*(\/([a-z0-9_\-\.]*)(?:\?[a-z0-9+_~\-\.%=&amp;]*)?)?(?:#[a-zA-Z0-9!$&'()*+.=-_~:@/?]*)?)(?:\s+|$)$`)
+var PartialUrlRegex = regexp.MustCompile(`/([A-Za-z0-9]{26})/([A-Za-z0-9]{26})/((?:[A-Za-z0-9]{26})?.+(?:\.[A-Za-z0-9]{3,})?)`)
+
+func IsValidHttpUrl(rawUrl string) bool {
+ if strings.Index(rawUrl, "http://") != 0 && strings.Index(rawUrl, "https://") != 0 {
+ return false
+ }
+
+ if _, err := url.ParseRequestURI(rawUrl); err != nil {
+ return false
+ }
+
+ return true
+}
+
+func IsValidHttpsUrl(rawUrl string) bool {
+ if strings.Index(rawUrl, "https://") != 0 {
+ return false
+ }
+
+ if _, err := url.ParseRequestURI(rawUrl); err != nil {
+ return false
+ }
+
+ return true
+}
+
+func IsValidTurnOrStunServer(rawUri string) bool {
+ if strings.Index(rawUri, "turn:") != 0 && strings.Index(rawUri, "stun:") != 0 {
+ return false
+ }
+
+ if _, err := url.ParseRequestURI(rawUri); err != nil {
+ return false
+ }
+
+ return true
+}
+
+func IsSafeLink(link *string) bool {
+ if link != nil {
+ if IsValidHttpUrl(*link) {
+ return true
+ } else if strings.HasPrefix(*link, "/") {
+ return true
+ } else {
+ return false
+ }
+ }
+
+ return true
+}
+
+func IsValidWebsocketUrl(rawUrl string) bool {
+ if strings.Index(rawUrl, "ws://") != 0 && strings.Index(rawUrl, "wss://") != 0 {
+ return false
+ }
+
+ if _, err := url.ParseRequestURI(rawUrl); err != nil {
+ return false
+ }
+
+ return true
+}
+
+func IsValidTrueOrFalseString(value string) bool {
+ return value == "true" || value == "false"
+}
+
+func IsValidNumberString(value string) bool {
+ if _, err := strconv.Atoi(value); err != nil {
+ return false
+ }
+
+ return true
+}
+
+func IsValidId(value string) bool {
+ if len(value) != 26 {
+ return false
+ }
+
+ for _, r := range value {
+ if !unicode.IsLetter(r) && !unicode.IsNumber(r) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/version.go b/vendor/github.com/mattermost/mattermost-server/model/version.go
new file mode 100644
index 00000000..1bd7baec
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/version.go
@@ -0,0 +1,148 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// This is a list of all the current viersions including any patches.
+// It should be maitained in chronological order with most current
+// release at the front of the list.
+var versions = []string{
+ "4.7.0",
+ "4.6.0",
+ "4.5.0",
+ "4.4.0",
+ "4.3.0",
+ "4.2.0",
+ "4.1.0",
+ "4.0.0",
+ "3.10.0",
+ "3.9.0",
+ "3.8.0",
+ "3.7.0",
+ "3.6.0",
+ "3.5.0",
+ "3.4.0",
+ "3.3.0",
+ "3.2.0",
+ "3.1.0",
+ "3.0.0",
+ "2.2.0",
+ "2.1.0",
+ "2.0.0",
+ "1.4.0",
+ "1.3.0",
+ "1.2.1",
+ "1.2.0",
+ "1.1.0",
+ "1.0.0",
+ "0.7.1",
+ "0.7.0",
+ "0.6.0",
+ "0.5.0",
+}
+
+var CurrentVersion string = versions[0]
+var BuildNumber string
+var BuildDate string
+var BuildHash string
+var BuildHashEnterprise string
+var BuildEnterpriseReady string
+var versionsWithoutHotFixes []string
+
+func init() {
+ versionsWithoutHotFixes = make([]string, 0, len(versions))
+ seen := make(map[string]string)
+
+ for _, version := range versions {
+ maj, min, _ := SplitVersion(version)
+ verStr := fmt.Sprintf("%v.%v.0", maj, min)
+
+ if seen[verStr] == "" {
+ versionsWithoutHotFixes = append(versionsWithoutHotFixes, verStr)
+ seen[verStr] = verStr
+ }
+ }
+}
+
+func SplitVersion(version string) (int64, int64, int64) {
+ parts := strings.Split(version, ".")
+
+ major := int64(0)
+ minor := int64(0)
+ patch := int64(0)
+
+ if len(parts) > 0 {
+ major, _ = strconv.ParseInt(parts[0], 10, 64)
+ }
+
+ if len(parts) > 1 {
+ minor, _ = strconv.ParseInt(parts[1], 10, 64)
+ }
+
+ if len(parts) > 2 {
+ patch, _ = strconv.ParseInt(parts[2], 10, 64)
+ }
+
+ return major, minor, patch
+}
+
+func GetPreviousVersion(version string) string {
+ verMajor, verMinor, _ := SplitVersion(version)
+ verStr := fmt.Sprintf("%v.%v.0", verMajor, verMinor)
+
+ for index, v := range versionsWithoutHotFixes {
+ if v == verStr && len(versionsWithoutHotFixes) > index+1 {
+ return versionsWithoutHotFixes[index+1]
+ }
+ }
+
+ return ""
+}
+
+func IsOfficalBuild() bool {
+ return BuildNumber != "_BUILD_NUMBER_"
+}
+
+func IsCurrentVersion(versionToCheck string) bool {
+ currentMajor, currentMinor, _ := SplitVersion(CurrentVersion)
+ toCheckMajor, toCheckMinor, _ := SplitVersion(versionToCheck)
+
+ if toCheckMajor == currentMajor && toCheckMinor == currentMinor {
+ return true
+ } else {
+ return false
+ }
+}
+
+func IsPreviousVersionsSupported(versionToCheck string) bool {
+ toCheckMajor, toCheckMinor, _ := SplitVersion(versionToCheck)
+ versionToCheckStr := fmt.Sprintf("%v.%v.0", toCheckMajor, toCheckMinor)
+
+ // Current Supported
+ if versionsWithoutHotFixes[0] == versionToCheckStr {
+ return true
+ }
+
+ // Current - 1 Supported
+ if versionsWithoutHotFixes[1] == versionToCheckStr {
+ return true
+ }
+
+ // Current - 2 Supported
+ if versionsWithoutHotFixes[2] == versionToCheckStr {
+ return true
+ }
+
+ // Current - 3 Supported
+ if versionsWithoutHotFixes[3] == versionToCheckStr {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/webrtc.go b/vendor/github.com/mattermost/mattermost-server/model/webrtc.go
new file mode 100644
index 00000000..59797a5b
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/webrtc.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2017 Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type WebrtcInfoResponse struct {
+ Token string `json:"token"`
+ GatewayUrl string `json:"gateway_url"`
+ StunUri string `json:"stun_uri,omitempty"`
+ TurnUri string `json:"turn_uri,omitempty"`
+ TurnPassword string `json:"turn_password,omitempty"`
+ TurnUsername string `json:"turn_username,omitempty"`
+}
+
+type GatewayResponse struct {
+ Status string `json:"janus"`
+}
+
+func GatewayResponseFromJson(data io.Reader) *GatewayResponse {
+ var o *GatewayResponse
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+func (o *WebrtcInfoResponse) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func WebrtcInfoResponseFromJson(data io.Reader) *WebrtcInfoResponse {
+ var o *WebrtcInfoResponse
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/websocket_client.go b/vendor/github.com/mattermost/mattermost-server/model/websocket_client.go
new file mode 100644
index 00000000..e5c44dde
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/websocket_client.go
@@ -0,0 +1,167 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "github.com/gorilla/websocket"
+)
+
+const (
+ SOCKET_MAX_MESSAGE_SIZE_KB = 8 * 1024 // 8KB
+)
+
+type WebSocketClient struct {
+ Url string // The location of the server like "ws://localhost:8065"
+ ApiUrl string // The api location of the server like "ws://localhost:8065/api/v3"
+ ConnectUrl string // The websocket URL to connect to like "ws://localhost:8065/api/v3/path/to/websocket"
+ Conn *websocket.Conn // The WebSocket connection
+ AuthToken string // The token used to open the WebSocket
+ Sequence int64 // The ever-incrementing sequence attached to each WebSocket action
+ EventChannel chan *WebSocketEvent
+ ResponseChannel chan *WebSocketResponse
+ ListenError *AppError
+}
+
+// NewWebSocketClient constructs a new WebSocket client with convienence
+// methods for talking to the server.
+func NewWebSocketClient(url, authToken string) (*WebSocketClient, *AppError) {
+ conn, _, err := websocket.DefaultDialer.Dial(url+API_URL_SUFFIX_V3+"/users/websocket", nil)
+ if err != nil {
+ return nil, NewAppError("NewWebSocketClient", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError)
+ }
+
+ client := &WebSocketClient{
+ url,
+ url + API_URL_SUFFIX_V3,
+ url + API_URL_SUFFIX_V3 + "/users/websocket",
+ conn,
+ authToken,
+ 1,
+ make(chan *WebSocketEvent, 100),
+ make(chan *WebSocketResponse, 100),
+ nil,
+ }
+
+ client.SendMessage(WEBSOCKET_AUTHENTICATION_CHALLENGE, map[string]interface{}{"token": authToken})
+
+ return client, nil
+}
+
+// NewWebSocketClient4 constructs a new WebSocket client with convienence
+// methods for talking to the server. Uses the v4 endpoint.
+func NewWebSocketClient4(url, authToken string) (*WebSocketClient, *AppError) {
+ conn, _, err := websocket.DefaultDialer.Dial(url+API_URL_SUFFIX+"/websocket", nil)
+ if err != nil {
+ return nil, NewAppError("NewWebSocketClient4", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError)
+ }
+
+ client := &WebSocketClient{
+ url,
+ url + API_URL_SUFFIX,
+ url + API_URL_SUFFIX + "/websocket",
+ conn,
+ authToken,
+ 1,
+ make(chan *WebSocketEvent, 100),
+ make(chan *WebSocketResponse, 100),
+ nil,
+ }
+
+ client.SendMessage(WEBSOCKET_AUTHENTICATION_CHALLENGE, map[string]interface{}{"token": authToken})
+
+ return client, nil
+}
+
+func (wsc *WebSocketClient) Connect() *AppError {
+ var err error
+ wsc.Conn, _, err = websocket.DefaultDialer.Dial(wsc.ConnectUrl, nil)
+ if err != nil {
+ return NewAppError("Connect", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError)
+ }
+
+ wsc.EventChannel = make(chan *WebSocketEvent, 100)
+ wsc.ResponseChannel = make(chan *WebSocketResponse, 100)
+
+ wsc.SendMessage(WEBSOCKET_AUTHENTICATION_CHALLENGE, map[string]interface{}{"token": wsc.AuthToken})
+
+ return nil
+}
+
+func (wsc *WebSocketClient) Close() {
+ wsc.Conn.Close()
+}
+
+func (wsc *WebSocketClient) Listen() {
+ go func() {
+ defer func() {
+ wsc.Conn.Close()
+ close(wsc.EventChannel)
+ close(wsc.ResponseChannel)
+ }()
+
+ for {
+ var rawMsg json.RawMessage
+ var err error
+ if _, rawMsg, err = wsc.Conn.ReadMessage(); err != nil {
+ if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseNoStatusReceived) {
+ wsc.ListenError = NewAppError("NewWebSocketClient", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError)
+ }
+
+ return
+ }
+
+ var event WebSocketEvent
+ if err := json.Unmarshal(rawMsg, &event); err == nil && event.IsValid() {
+ wsc.EventChannel <- &event
+ continue
+ }
+
+ var response WebSocketResponse
+ if err := json.Unmarshal(rawMsg, &response); err == nil && response.IsValid() {
+ wsc.ResponseChannel <- &response
+ continue
+ }
+
+ }
+ }()
+}
+
+func (wsc *WebSocketClient) SendMessage(action string, data map[string]interface{}) {
+ req := &WebSocketRequest{}
+ req.Seq = wsc.Sequence
+ req.Action = action
+ req.Data = data
+
+ wsc.Sequence++
+
+ wsc.Conn.WriteJSON(req)
+}
+
+// UserTyping will push a user_typing event out to all connected users
+// who are in the specified channel
+func (wsc *WebSocketClient) UserTyping(channelId, parentId string) {
+ data := map[string]interface{}{
+ "channel_id": channelId,
+ "parent_id": parentId,
+ }
+
+ wsc.SendMessage("user_typing", data)
+}
+
+// GetStatuses will return a map of string statuses using user id as the key
+func (wsc *WebSocketClient) GetStatuses() {
+ wsc.SendMessage("get_statuses", nil)
+}
+
+// GetStatusesByIds will fetch certain user statuses based on ids and return
+// a map of string statuses using user id as the key
+func (wsc *WebSocketClient) GetStatusesByIds(userIds []string) {
+ data := map[string]interface{}{
+ "user_ids": userIds,
+ }
+ wsc.SendMessage("get_statuses_by_ids", data)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/websocket_message.go b/vendor/github.com/mattermost/mattermost-server/model/websocket_message.go
new file mode 100644
index 00000000..0256e400
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/websocket_message.go
@@ -0,0 +1,132 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+const (
+ WEBSOCKET_EVENT_TYPING = "typing"
+ WEBSOCKET_EVENT_POSTED = "posted"
+ WEBSOCKET_EVENT_POST_EDITED = "post_edited"
+ WEBSOCKET_EVENT_POST_DELETED = "post_deleted"
+ WEBSOCKET_EVENT_CHANNEL_DELETED = "channel_deleted"
+ WEBSOCKET_EVENT_CHANNEL_CREATED = "channel_created"
+ WEBSOCKET_EVENT_CHANNEL_UPDATED = "channel_updated"
+ WEBSOCKET_EVENT_DIRECT_ADDED = "direct_added"
+ WEBSOCKET_EVENT_GROUP_ADDED = "group_added"
+ WEBSOCKET_EVENT_NEW_USER = "new_user"
+ WEBSOCKET_EVENT_ADDED_TO_TEAM = "added_to_team"
+ WEBSOCKET_EVENT_LEAVE_TEAM = "leave_team"
+ WEBSOCKET_EVENT_UPDATE_TEAM = "update_team"
+ WEBSOCKET_EVENT_DELETE_TEAM = "delete_team"
+ WEBSOCKET_EVENT_USER_ADDED = "user_added"
+ WEBSOCKET_EVENT_USER_UPDATED = "user_updated"
+ WEBSOCKET_EVENT_USER_ROLE_UPDATED = "user_role_updated"
+ WEBSOCKET_EVENT_MEMBERROLE_UPDATED = "memberrole_updated"
+ WEBSOCKET_EVENT_USER_REMOVED = "user_removed"
+ WEBSOCKET_EVENT_PREFERENCE_CHANGED = "preference_changed"
+ WEBSOCKET_EVENT_PREFERENCES_CHANGED = "preferences_changed"
+ WEBSOCKET_EVENT_PREFERENCES_DELETED = "preferences_deleted"
+ WEBSOCKET_EVENT_EPHEMERAL_MESSAGE = "ephemeral_message"
+ WEBSOCKET_EVENT_STATUS_CHANGE = "status_change"
+ WEBSOCKET_EVENT_HELLO = "hello"
+ WEBSOCKET_EVENT_WEBRTC = "webrtc"
+ WEBSOCKET_AUTHENTICATION_CHALLENGE = "authentication_challenge"
+ WEBSOCKET_EVENT_REACTION_ADDED = "reaction_added"
+ WEBSOCKET_EVENT_REACTION_REMOVED = "reaction_removed"
+ WEBSOCKET_EVENT_RESPONSE = "response"
+ WEBSOCKET_EVENT_EMOJI_ADDED = "emoji_added"
+ WEBSOCKET_EVENT_CHANNEL_VIEWED = "channel_viewed"
+ WEBSOCKET_EVENT_PLUGIN_ACTIVATED = "plugin_activated" // EXPERIMENTAL - SUBJECT TO CHANGE
+ WEBSOCKET_EVENT_PLUGIN_DEACTIVATED = "plugin_deactivated" // EXPERIMENTAL - SUBJECT TO CHANGE
+)
+
+type WebSocketMessage interface {
+ ToJson() string
+ IsValid() bool
+ EventType() string
+}
+
+type WebsocketBroadcast struct {
+ OmitUsers map[string]bool `json:"omit_users"` // broadcast is omitted for users listed here
+ UserId string `json:"user_id"` // broadcast only occurs for this user
+ ChannelId string `json:"channel_id"` // broadcast only occurs for users in this channel
+ TeamId string `json:"team_id"` // broadcast only occurs for users in this team
+}
+
+type WebSocketEvent struct {
+ Event string `json:"event"`
+ Data map[string]interface{} `json:"data"`
+ Broadcast *WebsocketBroadcast `json:"broadcast"`
+ Sequence int64 `json:"seq"`
+}
+
+func (m *WebSocketEvent) Add(key string, value interface{}) {
+ m.Data[key] = value
+}
+
+func NewWebSocketEvent(event, teamId, channelId, userId string, omitUsers map[string]bool) *WebSocketEvent {
+ return &WebSocketEvent{Event: event, Data: make(map[string]interface{}),
+ Broadcast: &WebsocketBroadcast{TeamId: teamId, ChannelId: channelId, UserId: userId, OmitUsers: omitUsers}}
+}
+
+func (o *WebSocketEvent) IsValid() bool {
+ return o.Event != ""
+}
+
+func (o *WebSocketEvent) EventType() string {
+ return o.Event
+}
+
+func (o *WebSocketEvent) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func WebSocketEventFromJson(data io.Reader) *WebSocketEvent {
+ var o *WebSocketEvent
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
+
+type WebSocketResponse struct {
+ Status string `json:"status"`
+ SeqReply int64 `json:"seq_reply,omitempty"`
+ Data map[string]interface{} `json:"data,omitempty"`
+ Error *AppError `json:"error,omitempty"`
+}
+
+func (m *WebSocketResponse) Add(key string, value interface{}) {
+ m.Data[key] = value
+}
+
+func NewWebSocketResponse(status string, seqReply int64, data map[string]interface{}) *WebSocketResponse {
+ return &WebSocketResponse{Status: status, SeqReply: seqReply, Data: data}
+}
+
+func NewWebSocketError(seqReply int64, err *AppError) *WebSocketResponse {
+ return &WebSocketResponse{Status: STATUS_FAIL, SeqReply: seqReply, Error: err}
+}
+
+func (o *WebSocketResponse) IsValid() bool {
+ return o.Status != ""
+}
+
+func (o *WebSocketResponse) EventType() string {
+ return WEBSOCKET_EVENT_RESPONSE
+}
+
+func (o *WebSocketResponse) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func WebSocketResponseFromJson(data io.Reader) *WebSocketResponse {
+ var o *WebSocketResponse
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/model/websocket_request.go b/vendor/github.com/mattermost/mattermost-server/model/websocket_request.go
new file mode 100644
index 00000000..4da626e2
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/model/websocket_request.go
@@ -0,0 +1,34 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+
+ goi18n "github.com/nicksnyder/go-i18n/i18n"
+)
+
+type WebSocketRequest struct {
+ // Client-provided fields
+ Seq int64 `json:"seq"`
+ Action string `json:"action"`
+ Data map[string]interface{} `json:"data"`
+
+ // Server-provided fields
+ Session Session `json:"-"`
+ T goi18n.TranslateFunc `json:"-"`
+ Locale string `json:"-"`
+}
+
+func (o *WebSocketRequest) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func WebSocketRequestFromJson(data io.Reader) *WebSocketRequest {
+ var o *WebSocketRequest
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/LICENSE.txt
new file mode 100644
index 00000000..ead98cf0
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/LICENSE.txt
@@ -0,0 +1,897 @@
+Mattermost Licensing
+
+SOFTWARE LICENSING
+
+You are licensed to use compiled versions of the Mattermost platform produced by Mattermost, Inc. under an MIT LICENSE
+
+- See MIT-COMPILED-LICENSE.md included in compiled versions for details
+
+You may be licensed to use source code to create compiled versions not produced by Mattermost, Inc. in one of two ways:
+
+1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or
+2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com
+
+You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/, model/,
+webapp/client, webapp/fonts, webapp/i18n, webapp/images and all subdirectories thereof) under the Apache License v2.0.
+
+We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not
+link to the Mattermost Platform directly, but exclusively uses the Mattermost Admin Tools and Configuration Files, and
+(b) you have not modified, added to or adapted the source code of Mattermost in a way that results in the creation of
+a “modified version” or “work based on” Mattermost as these terms are defined in the AGPL v3.0 license.
+
+MATTERMOST TRADEMARK GUIDELINES
+
+Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organization’s Trademark
+Standards of Use at http://www.mattermost.org/trademark-standards-of-use/. For trademark approval or any questions
+you have about using these trademarks, please email trademark@mattermost.com
+
+------------------------------------------------------------------------------------------------------------------------------
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+------------------------------------------------------------------------------
+
+The software is released under the terms of the GNU Affero General Public
+License, version 3.
+
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/config.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/config.go
new file mode 100644
index 00000000..577c3eb2
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/config.go
@@ -0,0 +1,288 @@
+// Copyright (C) 2010, Kyle Lemons <kyle@kylelemons.net>. All rights reserved.
+
+package log4go
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+)
+
+type xmlProperty struct {
+ Name string `xml:"name,attr"`
+ Value string `xml:",chardata"`
+}
+
+type xmlFilter struct {
+ Enabled string `xml:"enabled,attr"`
+ Tag string `xml:"tag"`
+ Level string `xml:"level"`
+ Type string `xml:"type"`
+ Property []xmlProperty `xml:"property"`
+}
+
+type xmlLoggerConfig struct {
+ Filter []xmlFilter `xml:"filter"`
+}
+
+// Load XML configuration; see examples/example.xml for documentation
+func (log Logger) LoadConfiguration(filename string) {
+ log.Close()
+
+ // Open the configuration file
+ fd, err := os.Open(filename)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not open %q for reading: %s\n", filename, err)
+ os.Exit(1)
+ }
+
+ contents, err := ioutil.ReadAll(fd)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not read %q: %s\n", filename, err)
+ os.Exit(1)
+ }
+
+ xc := new(xmlLoggerConfig)
+ if err := xml.Unmarshal(contents, xc); err != nil {
+ fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not parse XML configuration in %q: %s\n", filename, err)
+ os.Exit(1)
+ }
+
+ for _, xmlfilt := range xc.Filter {
+ var filt LogWriter
+ var lvl Level
+ bad, good, enabled := false, true, false
+
+ // Check required children
+ if len(xmlfilt.Enabled) == 0 {
+ fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required attribute %s for filter missing in %s\n", "enabled", filename)
+ bad = true
+ } else {
+ enabled = xmlfilt.Enabled != "false"
+ }
+ if len(xmlfilt.Tag) == 0 {
+ fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "tag", filename)
+ bad = true
+ }
+ if len(xmlfilt.Type) == 0 {
+ fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "type", filename)
+ bad = true
+ }
+ if len(xmlfilt.Level) == 0 {
+ fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "level", filename)
+ bad = true
+ }
+
+ switch xmlfilt.Level {
+ case "FINEST":
+ lvl = FINEST
+ case "FINE":
+ lvl = FINE
+ case "DEBUG":
+ lvl = DEBUG
+ case "TRACE":
+ lvl = TRACE
+ case "INFO":
+ lvl = INFO
+ case "WARNING":
+ lvl = WARNING
+ case "ERROR":
+ lvl = ERROR
+ case "CRITICAL":
+ lvl = CRITICAL
+ default:
+ fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter has unknown value in %s: %s\n", "level", filename, xmlfilt.Level)
+ bad = true
+ }
+
+ // Just so all of the required attributes are errored at the same time if missing
+ if bad {
+ os.Exit(1)
+ }
+
+ switch xmlfilt.Type {
+ case "console":
+ filt, good = xmlToConsoleLogWriter(filename, xmlfilt.Property, enabled)
+ case "file":
+ filt, good = xmlToFileLogWriter(filename, xmlfilt.Property, enabled)
+ case "xml":
+ filt, good = xmlToXMLLogWriter(filename, xmlfilt.Property, enabled)
+ case "socket":
+ filt, good = xmlToSocketLogWriter(filename, xmlfilt.Property, enabled)
+ default:
+ fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not load XML configuration in %s: unknown filter type \"%s\"\n", filename, xmlfilt.Type)
+ os.Exit(1)
+ }
+
+ // Just so all of the required params are errored at the same time if wrong
+ if !good {
+ os.Exit(1)
+ }
+
+ // If we're disabled (syntax and correctness checks only), don't add to logger
+ if !enabled {
+ continue
+ }
+
+ log[xmlfilt.Tag] = &Filter{lvl, filt}
+ }
+}
+
+func xmlToConsoleLogWriter(filename string, props []xmlProperty, enabled bool) (*ConsoleLogWriter, bool) {
+ // Parse properties
+ for _, prop := range props {
+ switch prop.Name {
+ default:
+ fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for console filter in %s\n", prop.Name, filename)
+ }
+ }
+
+ // If it's disabled, we're just checking syntax
+ if !enabled {
+ return nil, true
+ }
+
+ return NewConsoleLogWriter(), true
+}
+
+// Parse a number with K/M/G suffixes based on thousands (1000) or 2^10 (1024)
+func strToNumSuffix(str string, mult int) int {
+ num := 1
+ if len(str) > 1 {
+ switch str[len(str)-1] {
+ case 'G', 'g':
+ num *= mult
+ fallthrough
+ case 'M', 'm':
+ num *= mult
+ fallthrough
+ case 'K', 'k':
+ num *= mult
+ str = str[0 : len(str)-1]
+ }
+ }
+ parsed, _ := strconv.Atoi(str)
+ return parsed * num
+}
+func xmlToFileLogWriter(filename string, props []xmlProperty, enabled bool) (*FileLogWriter, bool) {
+ file := ""
+ format := "[%D %T] [%L] (%S) %M"
+ maxlines := 0
+ maxsize := 0
+ daily := false
+ rotate := false
+
+ // Parse properties
+ for _, prop := range props {
+ switch prop.Name {
+ case "filename":
+ file = strings.Trim(prop.Value, " \r\n")
+ case "format":
+ format = strings.Trim(prop.Value, " \r\n")
+ case "maxlines":
+ maxlines = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1000)
+ case "maxsize":
+ maxsize = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1024)
+ case "daily":
+ daily = strings.Trim(prop.Value, " \r\n") != "false"
+ case "rotate":
+ rotate = strings.Trim(prop.Value, " \r\n") != "false"
+ default:
+ fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for file filter in %s\n", prop.Name, filename)
+ }
+ }
+
+ // Check properties
+ if len(file) == 0 {
+ fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for file filter missing in %s\n", "filename", filename)
+ return nil, false
+ }
+
+ // If it's disabled, we're just checking syntax
+ if !enabled {
+ return nil, true
+ }
+
+ flw := NewFileLogWriter(file, rotate)
+ flw.SetFormat(format)
+ flw.SetRotateLines(maxlines)
+ flw.SetRotateSize(maxsize)
+ flw.SetRotateDaily(daily)
+ return flw, true
+}
+
+func xmlToXMLLogWriter(filename string, props []xmlProperty, enabled bool) (*FileLogWriter, bool) {
+ file := ""
+ maxrecords := 0
+ maxsize := 0
+ daily := false
+ rotate := false
+
+ // Parse properties
+ for _, prop := range props {
+ switch prop.Name {
+ case "filename":
+ file = strings.Trim(prop.Value, " \r\n")
+ case "maxrecords":
+ maxrecords = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1000)
+ case "maxsize":
+ maxsize = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1024)
+ case "daily":
+ daily = strings.Trim(prop.Value, " \r\n") != "false"
+ case "rotate":
+ rotate = strings.Trim(prop.Value, " \r\n") != "false"
+ default:
+ fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for xml filter in %s\n", prop.Name, filename)
+ }
+ }
+
+ // Check properties
+ if len(file) == 0 {
+ fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for xml filter missing in %s\n", "filename", filename)
+ return nil, false
+ }
+
+ // If it's disabled, we're just checking syntax
+ if !enabled {
+ return nil, true
+ }
+
+ xlw := NewXMLLogWriter(file, rotate)
+ xlw.SetRotateLines(maxrecords)
+ xlw.SetRotateSize(maxsize)
+ xlw.SetRotateDaily(daily)
+ return xlw, true
+}
+
+func xmlToSocketLogWriter(filename string, props []xmlProperty, enabled bool) (SocketLogWriter, bool) {
+ endpoint := ""
+ protocol := "udp"
+
+ // Parse properties
+ for _, prop := range props {
+ switch prop.Name {
+ case "endpoint":
+ endpoint = strings.Trim(prop.Value, " \r\n")
+ case "protocol":
+ protocol = strings.Trim(prop.Value, " \r\n")
+ default:
+ fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for file filter in %s\n", prop.Name, filename)
+ }
+ }
+
+ // Check properties
+ if len(endpoint) == 0 {
+ fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for file filter missing in %s\n", "endpoint", filename)
+ return nil, false
+ }
+
+ // If it's disabled, we're just checking syntax
+ if !enabled {
+ return nil, true
+ }
+
+ return NewSocketLogWriter(protocol, endpoint), true
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/ConsoleLogWriter_Manual.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/ConsoleLogWriter_Manual.go
new file mode 100644
index 00000000..698dd332
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/ConsoleLogWriter_Manual.go
@@ -0,0 +1,14 @@
+package main
+
+import (
+ "time"
+)
+
+import l4g "code.google.com/p/log4go"
+
+func main() {
+ log := l4g.NewLogger()
+ defer log.Close()
+ log.AddFilter("stdout", l4g.DEBUG, l4g.NewConsoleLogWriter())
+ log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02"))
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/FileLogWriter_Manual.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/FileLogWriter_Manual.go
new file mode 100644
index 00000000..efd596aa
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/FileLogWriter_Manual.go
@@ -0,0 +1,57 @@
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "time"
+)
+
+import l4g "code.google.com/p/log4go"
+
+const (
+ filename = "flw.log"
+)
+
+func main() {
+ // Get a new logger instance
+ log := l4g.NewLogger()
+
+ // Create a default logger that is logging messages of FINE or higher
+ log.AddFilter("file", l4g.FINE, l4g.NewFileLogWriter(filename, false))
+ log.Close()
+
+ /* Can also specify manually via the following: (these are the defaults) */
+ flw := l4g.NewFileLogWriter(filename, false)
+ flw.SetFormat("[%D %T] [%L] (%S) %M")
+ flw.SetRotate(false)
+ flw.SetRotateSize(0)
+ flw.SetRotateLines(0)
+ flw.SetRotateDaily(false)
+ log.AddFilter("file", l4g.FINE, flw)
+
+ // Log some experimental messages
+ log.Finest("Everything is created now (notice that I will not be printing to the file)")
+ log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02"))
+ log.Critical("Time to close out!")
+
+ // Close the log
+ log.Close()
+
+ // Print what was logged to the file (yes, I know I'm skipping error checking)
+ fd, _ := os.Open(filename)
+ in := bufio.NewReader(fd)
+ fmt.Print("Messages logged to file were: (line numbers not included)\n")
+ for lineno := 1; ; lineno++ {
+ line, err := in.ReadString('\n')
+ if err == io.EOF {
+ break
+ }
+ fmt.Printf("%3d:\t%s", lineno, line)
+ }
+ fd.Close()
+
+ // Remove the file so it's not lying around
+ os.Remove(filename)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/SimpleNetLogServer.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/SimpleNetLogServer.go
new file mode 100644
index 00000000..83c80ad1
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/SimpleNetLogServer.go
@@ -0,0 +1,42 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "net"
+ "os"
+)
+
+var (
+ port = flag.String("p", "12124", "Port number to listen on")
+)
+
+func e(err error) {
+ if err != nil {
+ fmt.Printf("Erroring out: %s\n", err)
+ os.Exit(1)
+ }
+}
+
+func main() {
+ flag.Parse()
+
+ // Bind to the port
+ bind, err := net.ResolveUDPAddr("0.0.0.0:" + *port)
+ e(err)
+
+ // Create listener
+ listener, err := net.ListenUDP("udp", bind)
+ e(err)
+
+ fmt.Printf("Listening to port %s...\n", *port)
+ for {
+ // read into a new buffer
+ buffer := make([]byte, 1024)
+ _, _, err := listener.ReadFrom(buffer)
+ e(err)
+
+ // log to standard output
+ fmt.Println(string(buffer))
+ }
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/SocketLogWriter_Manual.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/SocketLogWriter_Manual.go
new file mode 100644
index 00000000..400b698c
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/SocketLogWriter_Manual.go
@@ -0,0 +1,18 @@
+package main
+
+import (
+ "time"
+)
+
+import l4g "code.google.com/p/log4go"
+
+func main() {
+ log := l4g.NewLogger()
+ log.AddFilter("network", l4g.FINEST, l4g.NewSocketLogWriter("udp", "192.168.1.255:12124"))
+
+ // Run `nc -u -l -p 12124` or similar before you run this to see the following message
+ log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02"))
+
+ // This makes sure the output stream buffer is written
+ log.Close()
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/XMLConfigurationExample.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/XMLConfigurationExample.go
new file mode 100644
index 00000000..164c2add
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/XMLConfigurationExample.go
@@ -0,0 +1,13 @@
+package main
+
+import l4g "code.google.com/p/log4go"
+
+func main() {
+ // Load the configuration (isn't this easy?)
+ l4g.LoadConfiguration("example.xml")
+
+ // And now we're ready!
+ l4g.Finest("This will only go to those of you really cool UDP kids! If you change enabled=true.")
+ l4g.Debug("Oh no! %d + %d = %d!", 2, 2, 2+2)
+ l4g.Info("About that time, eh chaps?")
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/filelog.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/filelog.go
new file mode 100644
index 00000000..9bc4df15
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/filelog.go
@@ -0,0 +1,264 @@
+// Copyright (C) 2010, Kyle Lemons <kyle@kylelemons.net>. All rights reserved.
+
+package log4go
+
+import (
+ "fmt"
+ "os"
+ "time"
+)
+
+// This log writer sends output to a file
+type FileLogWriter struct {
+ rec chan *LogRecord
+ rot chan bool
+
+ // The opened file
+ filename string
+ file *os.File
+
+ // The logging format
+ format string
+
+ // File header/trailer
+ header, trailer string
+
+ // Rotate at linecount
+ maxlines int
+ maxlines_curlines int
+
+ // Rotate at size
+ maxsize int
+ maxsize_cursize int
+
+ // Rotate daily
+ daily bool
+ daily_opendate int
+
+ // Keep old logfiles (.001, .002, etc)
+ rotate bool
+ maxbackup int
+}
+
+// This is the FileLogWriter's output method
+func (w *FileLogWriter) LogWrite(rec *LogRecord) {
+ w.rec <- rec
+}
+
+func (w *FileLogWriter) Close() {
+ close(w.rec)
+}
+
+// NewFileLogWriter creates a new LogWriter which writes to the given file and
+// has rotation enabled if rotate is true.
+//
+// If rotate is true, any time a new log file is opened, the old one is renamed
+// with a .### extension to preserve it. The various Set* methods can be used
+// to configure log rotation based on lines, size, and daily.
+//
+// The standard log-line format is:
+// [%D %T] [%L] (%S) %M
+func NewFileLogWriter(fname string, rotate bool) *FileLogWriter {
+ w := &FileLogWriter{
+ rec: make(chan *LogRecord, LogBufferLength),
+ rot: make(chan bool),
+ filename: fname,
+ format: "[%D %T] [%L] (%S) %M",
+ rotate: rotate,
+ maxbackup: 999,
+ }
+
+ // open the file for the first time
+ if err := w.intRotate(); err != nil {
+ fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err)
+ return nil
+ }
+
+ go func() {
+ defer func() {
+ if w.file != nil {
+ fmt.Fprint(w.file, FormatLogRecord(w.trailer, &LogRecord{Created: time.Now()}))
+ w.file.Sync()
+ w.file.Close()
+ }
+ }()
+
+ for {
+ select {
+ case <-w.rot:
+ if err := w.intRotate(); err != nil {
+ fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err)
+ return
+ }
+ case rec, ok := <-w.rec:
+ if !ok {
+ return
+ }
+ now := time.Now()
+ if (w.maxlines > 0 && w.maxlines_curlines >= w.maxlines) ||
+ (w.maxsize > 0 && w.maxsize_cursize >= w.maxsize) ||
+ (w.daily && now.Day() != w.daily_opendate) {
+ if err := w.intRotate(); err != nil {
+ fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err)
+ return
+ }
+ }
+
+ // Perform the write
+ n, err := fmt.Fprint(w.file, FormatLogRecord(w.format, rec))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err)
+ return
+ }
+
+ // Update the counts
+ w.maxlines_curlines++
+ w.maxsize_cursize += n
+ }
+ }
+ }()
+
+ return w
+}
+
+// Request that the logs rotate
+func (w *FileLogWriter) Rotate() {
+ w.rot <- true
+}
+
+// If this is called in a threaded context, it MUST be synchronized
+func (w *FileLogWriter) intRotate() error {
+ // Close any log file that may be open
+ if w.file != nil {
+ fmt.Fprint(w.file, FormatLogRecord(w.trailer, &LogRecord{Created: time.Now()}))
+ w.file.Close()
+ }
+
+ // If we are keeping log files, move it to the next available number
+ if w.rotate {
+ _, err := os.Lstat(w.filename)
+ if err == nil { // file exists
+ // Find the next available number
+ num := 1
+ fname := ""
+ if w.daily && time.Now().Day() != w.daily_opendate {
+ yesterday := time.Now().AddDate(0, 0, -1).Format("2006-01-02")
+
+ for ; err == nil && num <= 999; num++ {
+ fname = w.filename + fmt.Sprintf(".%s.%03d", yesterday, num)
+ _, err = os.Lstat(fname)
+ }
+ // return error if the last file checked still existed
+ if err == nil {
+ return fmt.Errorf("Rotate: Cannot find free log number to rename %s\n", w.filename)
+ }
+ } else {
+ num = w.maxbackup - 1
+ for ; num >= 1; num-- {
+ fname = w.filename + fmt.Sprintf(".%d", num)
+ nfname := w.filename + fmt.Sprintf(".%d", num+1)
+ _, err = os.Lstat(fname)
+ if err == nil {
+ os.Rename(fname, nfname)
+ }
+ }
+ }
+
+ w.file.Close()
+ // Rename the file to its newfound home
+ err = os.Rename(w.filename, fname)
+ if err != nil {
+ return fmt.Errorf("Rotate: %s\n", err)
+ }
+ }
+ }
+
+ // Open the log file
+ fd, err := os.OpenFile(w.filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0660)
+ if err != nil {
+ return err
+ }
+ w.file = fd
+
+ now := time.Now()
+ fmt.Fprint(w.file, FormatLogRecord(w.header, &LogRecord{Created: now}))
+
+ // Set the daily open date to the current date
+ w.daily_opendate = now.Day()
+
+ // initialize rotation values
+ w.maxlines_curlines = 0
+ w.maxsize_cursize = 0
+
+ return nil
+}
+
+// Set the logging format (chainable). Must be called before the first log
+// message is written.
+func (w *FileLogWriter) SetFormat(format string) *FileLogWriter {
+ w.format = format
+ return w
+}
+
+// Set the logfile header and footer (chainable). Must be called before the first log
+// message is written. These are formatted similar to the FormatLogRecord (e.g.
+// you can use %D and %T in your header/footer for date and time).
+func (w *FileLogWriter) SetHeadFoot(head, foot string) *FileLogWriter {
+ w.header, w.trailer = head, foot
+ if w.maxlines_curlines == 0 {
+ fmt.Fprint(w.file, FormatLogRecord(w.header, &LogRecord{Created: time.Now()}))
+ }
+ return w
+}
+
+// Set rotate at linecount (chainable). Must be called before the first log
+// message is written.
+func (w *FileLogWriter) SetRotateLines(maxlines int) *FileLogWriter {
+ //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateLines: %v\n", maxlines)
+ w.maxlines = maxlines
+ return w
+}
+
+// Set rotate at size (chainable). Must be called before the first log message
+// is written.
+func (w *FileLogWriter) SetRotateSize(maxsize int) *FileLogWriter {
+ //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateSize: %v\n", maxsize)
+ w.maxsize = maxsize
+ return w
+}
+
+// Set rotate daily (chainable). Must be called before the first log message is
+// written.
+func (w *FileLogWriter) SetRotateDaily(daily bool) *FileLogWriter {
+ //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateDaily: %v\n", daily)
+ w.daily = daily
+ return w
+}
+
+// Set max backup files. Must be called before the first log message
+// is written.
+func (w *FileLogWriter) SetRotateMaxBackup(maxbackup int) *FileLogWriter {
+ w.maxbackup = maxbackup
+ return w
+}
+
+// SetRotate changes whether or not the old logs are kept. (chainable) Must be
+// called before the first log message is written. If rotate is false, the
+// files are overwritten; otherwise, they are rotated to another file before the
+// new log is opened.
+func (w *FileLogWriter) SetRotate(rotate bool) *FileLogWriter {
+ //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotate: %v\n", rotate)
+ w.rotate = rotate
+ return w
+}
+
+// NewXMLLogWriter is a utility method for creating a FileLogWriter set up to
+// output XML record log messages instead of line-based ones.
+func NewXMLLogWriter(fname string, rotate bool) *FileLogWriter {
+ return NewFileLogWriter(fname, rotate).SetFormat(
+ ` <record level="%L">
+ <timestamp>%D %T</timestamp>
+ <source>%S</source>
+ <message>%M</message>
+ </record>`).SetHeadFoot("<log created=\"%D %T\">", "</log>")
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/log4go.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/log4go.go
new file mode 100644
index 00000000..822e890c
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/log4go.go
@@ -0,0 +1,484 @@
+// Copyright (C) 2010, Kyle Lemons <kyle@kylelemons.net>. All rights reserved.
+
+// Package log4go provides level-based and highly configurable logging.
+//
+// Enhanced Logging
+//
+// This is inspired by the logging functionality in Java. Essentially, you create a Logger
+// object and create output filters for it. You can send whatever you want to the Logger,
+// and it will filter that based on your settings and send it to the outputs. This way, you
+// can put as much debug code in your program as you want, and when you're done you can filter
+// out the mundane messages so only the important ones show up.
+//
+// Utility functions are provided to make life easier. Here is some example code to get started:
+//
+// log := log4go.NewLogger()
+// log.AddFilter("stdout", log4go.DEBUG, log4go.NewConsoleLogWriter())
+// log.AddFilter("log", log4go.FINE, log4go.NewFileLogWriter("example.log", true))
+// log.Info("The time is now: %s", time.LocalTime().Format("15:04:05 MST 2006/01/02"))
+//
+// The first two lines can be combined with the utility NewDefaultLogger:
+//
+// log := log4go.NewDefaultLogger(log4go.DEBUG)
+// log.AddFilter("log", log4go.FINE, log4go.NewFileLogWriter("example.log", true))
+// log.Info("The time is now: %s", time.LocalTime().Format("15:04:05 MST 2006/01/02"))
+//
+// Usage notes:
+// - The ConsoleLogWriter does not display the source of the message to standard
+// output, but the FileLogWriter does.
+// - The utility functions (Info, Debug, Warn, etc) derive their source from the
+// calling function, and this incurs extra overhead.
+//
+// Changes from 2.0:
+// - The external interface has remained mostly stable, but a lot of the
+// internals have been changed, so if you depended on any of this or created
+// your own LogWriter, then you will probably have to update your code. In
+// particular, Logger is now a map and ConsoleLogWriter is now a channel
+// behind-the-scenes, and the LogWrite method no longer has return values.
+//
+// Future work: (please let me know if you think I should work on any of these particularly)
+// - Log file rotation
+// - Logging configuration files ala log4j
+// - Have the ability to remove filters?
+// - Have GetInfoChannel, GetDebugChannel, etc return a chan string that allows
+// for another method of logging
+// - Add an XML filter type
+package log4go
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+ "time"
+)
+
+// Version information
+const (
+ L4G_VERSION = "log4go-v3.0.1"
+ L4G_MAJOR = 3
+ L4G_MINOR = 0
+ L4G_BUILD = 1
+)
+
+/****** Constants ******/
+
+// These are the integer logging levels used by the logger
+type Level int
+
+const (
+ FINEST Level = iota
+ FINE
+ DEBUG
+ TRACE
+ INFO
+ WARNING
+ ERROR
+ CRITICAL
+)
+
+// Logging level strings
+var (
+ levelStrings = [...]string{"FNST", "FINE", "DEBG", "TRAC", "INFO", "WARN", "EROR", "CRIT"}
+)
+
+func (l Level) String() string {
+ if l < 0 || int(l) > len(levelStrings) {
+ return "UNKNOWN"
+ }
+ return levelStrings[int(l)]
+}
+
+/****** Variables ******/
+var (
+ // LogBufferLength specifies how many log messages a particular log4go
+ // logger can buffer at a time before writing them.
+ LogBufferLength = 32
+)
+
+/****** LogRecord ******/
+
+// A LogRecord contains all of the pertinent information for each message
+type LogRecord struct {
+ Level Level // The log level
+ Created time.Time // The time at which the log message was created (nanoseconds)
+ Source string // The message source
+ Message string // The log message
+}
+
+/****** LogWriter ******/
+
+// This is an interface for anything that should be able to write logs
+type LogWriter interface {
+ // This will be called to log a LogRecord message.
+ LogWrite(rec *LogRecord)
+
+ // This should clean up anything lingering about the LogWriter, as it is called before
+ // the LogWriter is removed. LogWrite should not be called after Close.
+ Close()
+}
+
+/****** Logger ******/
+
+// A Filter represents the log level below which no log records are written to
+// the associated LogWriter.
+type Filter struct {
+ Level Level
+ LogWriter
+}
+
+// A Logger represents a collection of Filters through which log messages are
+// written.
+type Logger map[string]*Filter
+
+// Create a new logger.
+//
+// DEPRECATED: Use make(Logger) instead.
+func NewLogger() Logger {
+ os.Stderr.WriteString("warning: use of deprecated NewLogger\n")
+ return make(Logger)
+}
+
+// Create a new logger with a "stdout" filter configured to send log messages at
+// or above lvl to standard output.
+//
+// DEPRECATED: use NewDefaultLogger instead.
+func NewConsoleLogger(lvl Level) Logger {
+ os.Stderr.WriteString("warning: use of deprecated NewConsoleLogger\n")
+ return Logger{
+ "stdout": &Filter{lvl, NewConsoleLogWriter()},
+ }
+}
+
+// Create a new logger with a "stdout" filter configured to send log messages at
+// or above lvl to standard output.
+func NewDefaultLogger(lvl Level) Logger {
+ return Logger{
+ "stdout": &Filter{lvl, NewConsoleLogWriter()},
+ }
+}
+
+// Closes all log writers in preparation for exiting the program or a
+// reconfiguration of logging. Calling this is not really imperative, unless
+// you want to guarantee that all log messages are written. Close removes
+// all filters (and thus all LogWriters) from the logger.
+func (log Logger) Close() {
+ // Close all open loggers
+ for name, filt := range log {
+ filt.Close()
+ delete(log, name)
+ }
+}
+
+// Add a new LogWriter to the Logger which will only log messages at lvl or
+// higher. This function should not be called from multiple goroutines.
+// Returns the logger for chaining.
+func (log Logger) AddFilter(name string, lvl Level, writer LogWriter) Logger {
+ log[name] = &Filter{lvl, writer}
+ return log
+}
+
+/******* Logging *******/
+// Send a formatted log message internally
+func (log Logger) intLogf(lvl Level, format string, args ...interface{}) {
+ skip := true
+
+ // Determine if any logging will be done
+ for _, filt := range log {
+ if lvl >= filt.Level {
+ skip = false
+ break
+ }
+ }
+ if skip {
+ return
+ }
+
+ // Determine caller func
+ pc, _, lineno, ok := runtime.Caller(2)
+ src := ""
+ if ok {
+ src = fmt.Sprintf("%s:%d", runtime.FuncForPC(pc).Name(), lineno)
+ }
+
+ msg := format
+ if len(args) > 0 {
+ msg = fmt.Sprintf(format, args...)
+ }
+
+ // Make the log record
+ rec := &LogRecord{
+ Level: lvl,
+ Created: time.Now(),
+ Source: src,
+ Message: msg,
+ }
+
+ // Dispatch the logs
+ for _, filt := range log {
+ if lvl < filt.Level {
+ continue
+ }
+ filt.LogWrite(rec)
+ }
+}
+
+// Send a closure log message internally
+func (log Logger) intLogc(lvl Level, closure func() string) {
+ skip := true
+
+ // Determine if any logging will be done
+ for _, filt := range log {
+ if lvl >= filt.Level {
+ skip = false
+ break
+ }
+ }
+ if skip {
+ return
+ }
+
+ // Determine caller func
+ pc, _, lineno, ok := runtime.Caller(2)
+ src := ""
+ if ok {
+ src = fmt.Sprintf("%s:%d", runtime.FuncForPC(pc).Name(), lineno)
+ }
+
+ // Make the log record
+ rec := &LogRecord{
+ Level: lvl,
+ Created: time.Now(),
+ Source: src,
+ Message: closure(),
+ }
+
+ // Dispatch the logs
+ for _, filt := range log {
+ if lvl < filt.Level {
+ continue
+ }
+ filt.LogWrite(rec)
+ }
+}
+
+// Send a log message with manual level, source, and message.
+func (log Logger) Log(lvl Level, source, message string) {
+ skip := true
+
+ // Determine if any logging will be done
+ for _, filt := range log {
+ if lvl >= filt.Level {
+ skip = false
+ break
+ }
+ }
+ if skip {
+ return
+ }
+
+ // Make the log record
+ rec := &LogRecord{
+ Level: lvl,
+ Created: time.Now(),
+ Source: source,
+ Message: message,
+ }
+
+ // Dispatch the logs
+ for _, filt := range log {
+ if lvl < filt.Level {
+ continue
+ }
+ filt.LogWrite(rec)
+ }
+}
+
+// Logf logs a formatted log message at the given log level, using the caller as
+// its source.
+func (log Logger) Logf(lvl Level, format string, args ...interface{}) {
+ log.intLogf(lvl, format, args...)
+}
+
+// Logc logs a string returned by the closure at the given log level, using the caller as
+// its source. If no log message would be written, the closure is never called.
+func (log Logger) Logc(lvl Level, closure func() string) {
+ log.intLogc(lvl, closure)
+}
+
+// Finest logs a message at the finest log level.
+// See Debug for an explanation of the arguments.
+func (log Logger) Finest(arg0 interface{}, args ...interface{}) {
+ const (
+ lvl = FINEST
+ )
+ switch first := arg0.(type) {
+ case string:
+ // Use the string as a format string
+ log.intLogf(lvl, first, args...)
+ case func() string:
+ // Log the closure (no other arguments used)
+ log.intLogc(lvl, first)
+ default:
+ // Build a format string so that it will be similar to Sprint
+ log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
+ }
+}
+
+// Fine logs a message at the fine log level.
+// See Debug for an explanation of the arguments.
+func (log Logger) Fine(arg0 interface{}, args ...interface{}) {
+ const (
+ lvl = FINE
+ )
+ switch first := arg0.(type) {
+ case string:
+ // Use the string as a format string
+ log.intLogf(lvl, first, args...)
+ case func() string:
+ // Log the closure (no other arguments used)
+ log.intLogc(lvl, first)
+ default:
+ // Build a format string so that it will be similar to Sprint
+ log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
+ }
+}
+
+// Debug is a utility method for debug log messages.
+// The behavior of Debug depends on the first argument:
+// - arg0 is a string
+// When given a string as the first argument, this behaves like Logf but with
+// the DEBUG log level: the first argument is interpreted as a format for the
+// latter arguments.
+// - arg0 is a func()string
+// When given a closure of type func()string, this logs the string returned by
+// the closure iff it will be logged. The closure runs at most one time.
+// - arg0 is interface{}
+// When given anything else, the log message will be each of the arguments
+// formatted with %v and separated by spaces (ala Sprint).
+func (log Logger) Debug(arg0 interface{}, args ...interface{}) {
+ const (
+ lvl = DEBUG
+ )
+ switch first := arg0.(type) {
+ case string:
+ // Use the string as a format string
+ log.intLogf(lvl, first, args...)
+ case func() string:
+ // Log the closure (no other arguments used)
+ log.intLogc(lvl, first)
+ default:
+ // Build a format string so that it will be similar to Sprint
+ log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
+ }
+}
+
+// Trace logs a message at the trace log level.
+// See Debug for an explanation of the arguments.
+func (log Logger) Trace(arg0 interface{}, args ...interface{}) {
+ const (
+ lvl = TRACE
+ )
+ switch first := arg0.(type) {
+ case string:
+ // Use the string as a format string
+ log.intLogf(lvl, first, args...)
+ case func() string:
+ // Log the closure (no other arguments used)
+ log.intLogc(lvl, first)
+ default:
+ // Build a format string so that it will be similar to Sprint
+ log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
+ }
+}
+
+// Info logs a message at the info log level.
+// See Debug for an explanation of the arguments.
+func (log Logger) Info(arg0 interface{}, args ...interface{}) {
+ const (
+ lvl = INFO
+ )
+ switch first := arg0.(type) {
+ case string:
+ // Use the string as a format string
+ log.intLogf(lvl, first, args...)
+ case func() string:
+ // Log the closure (no other arguments used)
+ log.intLogc(lvl, first)
+ default:
+ // Build a format string so that it will be similar to Sprint
+ log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
+ }
+}
+
+// Warn logs a message at the warning log level and returns the formatted error.
+// At the warning level and higher, there is no performance benefit if the
+// message is not actually logged, because all formats are processed and all
+// closures are executed to format the error message.
+// See Debug for further explanation of the arguments.
+func (log Logger) Warn(arg0 interface{}, args ...interface{}) error {
+ const (
+ lvl = WARNING
+ )
+ var msg string
+ switch first := arg0.(type) {
+ case string:
+ // Use the string as a format string
+ msg = fmt.Sprintf(first, args...)
+ case func() string:
+ // Log the closure (no other arguments used)
+ msg = first()
+ default:
+ // Build a format string so that it will be similar to Sprint
+ msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...)
+ }
+ log.intLogf(lvl, msg)
+ return errors.New(msg)
+}
+
+// Error logs a message at the error log level and returns the formatted error,
+// See Warn for an explanation of the performance and Debug for an explanation
+// of the parameters.
+func (log Logger) Error(arg0 interface{}, args ...interface{}) error {
+ const (
+ lvl = ERROR
+ )
+ var msg string
+ switch first := arg0.(type) {
+ case string:
+ // Use the string as a format string
+ msg = fmt.Sprintf(first, args...)
+ case func() string:
+ // Log the closure (no other arguments used)
+ msg = first()
+ default:
+ // Build a format string so that it will be similar to Sprint
+ msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...)
+ }
+ log.intLogf(lvl, msg)
+ return errors.New(msg)
+}
+
+// Critical logs a message at the critical log level and returns the formatted error,
+// See Warn for an explanation of the performance and Debug for an explanation
+// of the parameters.
+func (log Logger) Critical(arg0 interface{}, args ...interface{}) error {
+ const (
+ lvl = CRITICAL
+ )
+ var msg string
+ switch first := arg0.(type) {
+ case string:
+ // Use the string as a format string
+ msg = fmt.Sprintf(first, args...)
+ case func() string:
+ // Log the closure (no other arguments used)
+ msg = first()
+ default:
+ // Build a format string so that it will be similar to Sprint
+ msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...)
+ }
+ log.intLogf(lvl, msg)
+ return errors.New(msg)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/pattlog.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/pattlog.go
new file mode 100644
index 00000000..98632e4d
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/pattlog.go
@@ -0,0 +1,130 @@
+// Copyright (C) 2010, Kyle Lemons <kyle@kylelemons.net>. All rights reserved.
+
+package log4go
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+)
+
+const (
+ FORMAT_DEFAULT = "[%D %T] [%L] (%S) %M"
+ FORMAT_SHORT = "[%t %d] [%L] %M"
+ FORMAT_ABBREV = "[%L] %M"
+)
+
+type formatCacheType struct {
+ LastUpdateSeconds int64
+ shortTime, shortDate string
+ longTime, longDate string
+}
+
+var formatCache = &formatCacheType{}
+var mutex sync.Mutex
+
+// Known format codes:
+// %T - Time (15:04:05 MST)
+// %t - Time (15:04)
+// %D - Date (2006/01/02)
+// %d - Date (01/02/06)
+// %L - Level (FNST, FINE, DEBG, TRAC, WARN, EROR, CRIT)
+// %S - Source
+// %M - Message
+// Ignores unknown formats
+// Recommended: "[%D %T] [%L] (%S) %M"
+func FormatLogRecord(format string, rec *LogRecord) string {
+ if rec == nil {
+ return "<nil>"
+ }
+ if len(format) == 0 {
+ return ""
+ }
+
+ out := bytes.NewBuffer(make([]byte, 0, 64))
+ secs := rec.Created.UnixNano() / 1e9
+
+ mutex.Lock()
+ cache := *formatCache
+ if cache.LastUpdateSeconds != secs {
+ month, day, year := rec.Created.Month(), rec.Created.Day(), rec.Created.Year()
+ hour, minute, second := rec.Created.Hour(), rec.Created.Minute(), rec.Created.Second()
+ zone, _ := rec.Created.Zone()
+ updated := &formatCacheType{
+ LastUpdateSeconds: secs,
+ shortTime: fmt.Sprintf("%02d:%02d", hour, minute),
+ shortDate: fmt.Sprintf("%02d/%02d/%02d", day, month, year%100),
+ longTime: fmt.Sprintf("%02d:%02d:%02d %s", hour, minute, second, zone),
+ longDate: fmt.Sprintf("%04d/%02d/%02d", year, month, day),
+ }
+ cache = *updated
+ formatCache = updated
+ }
+ mutex.Unlock()
+
+ // Split the string into pieces by % signs
+ pieces := bytes.Split([]byte(format), []byte{'%'})
+
+ // Iterate over the pieces, replacing known formats
+ for i, piece := range pieces {
+ if i > 0 && len(piece) > 0 {
+ switch piece[0] {
+ case 'T':
+ out.WriteString(cache.longTime)
+ case 't':
+ out.WriteString(cache.shortTime)
+ case 'D':
+ out.WriteString(cache.longDate)
+ case 'd':
+ out.WriteString(cache.shortDate)
+ case 'L':
+ out.WriteString(levelStrings[rec.Level])
+ case 'S':
+ out.WriteString(rec.Source)
+ case 's':
+ slice := strings.Split(rec.Source, "/")
+ out.WriteString(slice[len(slice)-1])
+ case 'M':
+ out.WriteString(rec.Message)
+ }
+ if len(piece) > 1 {
+ out.Write(piece[1:])
+ }
+ } else if len(piece) > 0 {
+ out.Write(piece)
+ }
+ }
+ out.WriteByte('\n')
+
+ return out.String()
+}
+
+// This is the standard writer that prints to standard output.
+type FormatLogWriter chan *LogRecord
+
+// This creates a new FormatLogWriter
+func NewFormatLogWriter(out io.Writer, format string) FormatLogWriter {
+ records := make(FormatLogWriter, LogBufferLength)
+ go records.run(out, format)
+ return records
+}
+
+func (w FormatLogWriter) run(out io.Writer, format string) {
+ for rec := range w {
+ fmt.Fprint(out, FormatLogRecord(format, rec))
+ }
+}
+
+// This is the FormatLogWriter's output method. This will block if the output
+// buffer is full.
+func (w FormatLogWriter) LogWrite(rec *LogRecord) {
+ w <- rec
+}
+
+// Close stops the logger from sending messages to standard output. Attempts to
+// send log messages to this logger after a Close have undefined behavior.
+func (w FormatLogWriter) Close() {
+ close(w)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/socklog.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/socklog.go
new file mode 100644
index 00000000..1d224a99
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/socklog.go
@@ -0,0 +1,57 @@
+// Copyright (C) 2010, Kyle Lemons <kyle@kylelemons.net>. All rights reserved.
+
+package log4go
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "os"
+)
+
+// This log writer sends output to a socket
+type SocketLogWriter chan *LogRecord
+
+// This is the SocketLogWriter's output method
+func (w SocketLogWriter) LogWrite(rec *LogRecord) {
+ w <- rec
+}
+
+func (w SocketLogWriter) Close() {
+ close(w)
+}
+
+func NewSocketLogWriter(proto, hostport string) SocketLogWriter {
+ sock, err := net.Dial(proto, hostport)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "NewSocketLogWriter(%q): %s\n", hostport, err)
+ return nil
+ }
+
+ w := SocketLogWriter(make(chan *LogRecord, LogBufferLength))
+
+ go func() {
+ defer func() {
+ if sock != nil && proto == "tcp" {
+ sock.Close()
+ }
+ }()
+
+ for rec := range w {
+ // Marshall into JSON
+ js, err := json.Marshal(rec)
+ if err != nil {
+ fmt.Fprint(os.Stderr, "SocketLogWriter(%q): %s", hostport, err)
+ return
+ }
+
+ _, err = sock.Write(js)
+ if err != nil {
+ fmt.Fprint(os.Stderr, "SocketLogWriter(%q): %s", hostport, err)
+ return
+ }
+ }
+ }()
+
+ return w
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/termlog.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/termlog.go
new file mode 100644
index 00000000..8a941e26
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/termlog.go
@@ -0,0 +1,49 @@
+// Copyright (C) 2010, Kyle Lemons <kyle@kylelemons.net>. All rights reserved.
+
+package log4go
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "time"
+)
+
+var stdout io.Writer = os.Stdout
+
+// This is the standard writer that prints to standard output.
+type ConsoleLogWriter struct {
+ format string
+ w chan *LogRecord
+}
+
+// This creates a new ConsoleLogWriter
+func NewConsoleLogWriter() *ConsoleLogWriter {
+ consoleWriter := &ConsoleLogWriter{
+ format: "[%T %D] [%L] (%S) %M",
+ w: make(chan *LogRecord, LogBufferLength),
+ }
+ go consoleWriter.run(stdout)
+ return consoleWriter
+}
+func (c *ConsoleLogWriter) SetFormat(format string) {
+ c.format = format
+}
+func (c *ConsoleLogWriter) run(out io.Writer) {
+ for rec := range c.w {
+ fmt.Fprint(out, FormatLogRecord(c.format, rec))
+ }
+}
+
+// This is the ConsoleLogWriter's output method. This will block if the output
+// buffer is full.
+func (c *ConsoleLogWriter) LogWrite(rec *LogRecord) {
+ c.w <- rec
+}
+
+// Close stops the logger from sending messages to standard output. Attempts to
+// send log messages to this logger after a Close have undefined behavior.
+func (c *ConsoleLogWriter) Close() {
+ close(c.w)
+ time.Sleep(50 * time.Millisecond) // Try to give console I/O time to complete
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/wrapper.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/wrapper.go
new file mode 100644
index 00000000..2ae222b0
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/wrapper.go
@@ -0,0 +1,278 @@
+// Copyright (C) 2010, Kyle Lemons <kyle@kylelemons.net>. All rights reserved.
+
+package log4go
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+)
+
+var (
+ Global Logger
+)
+
+func init() {
+ Global = NewDefaultLogger(DEBUG)
+}
+
+// Wrapper for (*Logger).LoadConfiguration
+func LoadConfiguration(filename string) {
+ Global.LoadConfiguration(filename)
+}
+
+// Wrapper for (*Logger).AddFilter
+func AddFilter(name string, lvl Level, writer LogWriter) {
+ Global.AddFilter(name, lvl, writer)
+}
+
+// Wrapper for (*Logger).Close (closes and removes all logwriters)
+func Close() {
+ Global.Close()
+}
+
+func Crash(args ...interface{}) {
+ if len(args) > 0 {
+ Global.intLogf(CRITICAL, strings.Repeat(" %v", len(args))[1:], args...)
+ }
+ panic(args)
+}
+
+// Logs the given message and crashes the program
+func Crashf(format string, args ...interface{}) {
+ Global.intLogf(CRITICAL, format, args...)
+ Global.Close() // so that hopefully the messages get logged
+ panic(fmt.Sprintf(format, args...))
+}
+
+// Compatibility with `log`
+func Exit(args ...interface{}) {
+ if len(args) > 0 {
+ Global.intLogf(ERROR, strings.Repeat(" %v", len(args))[1:], args...)
+ }
+ Global.Close() // so that hopefully the messages get logged
+ os.Exit(0)
+}
+
+// Compatibility with `log`
+func Exitf(format string, args ...interface{}) {
+ Global.intLogf(ERROR, format, args...)
+ Global.Close() // so that hopefully the messages get logged
+ os.Exit(0)
+}
+
+// Compatibility with `log`
+func Stderr(args ...interface{}) {
+ if len(args) > 0 {
+ Global.intLogf(ERROR, strings.Repeat(" %v", len(args))[1:], args...)
+ }
+}
+
+// Compatibility with `log`
+func Stderrf(format string, args ...interface{}) {
+ Global.intLogf(ERROR, format, args...)
+}
+
+// Compatibility with `log`
+func Stdout(args ...interface{}) {
+ if len(args) > 0 {
+ Global.intLogf(INFO, strings.Repeat(" %v", len(args))[1:], args...)
+ }
+}
+
+// Compatibility with `log`
+func Stdoutf(format string, args ...interface{}) {
+ Global.intLogf(INFO, format, args...)
+}
+
+// Send a log message manually
+// Wrapper for (*Logger).Log
+func Log(lvl Level, source, message string) {
+ Global.Log(lvl, source, message)
+}
+
+// Send a formatted log message easily
+// Wrapper for (*Logger).Logf
+func Logf(lvl Level, format string, args ...interface{}) {
+ Global.intLogf(lvl, format, args...)
+}
+
+// Send a closure log message
+// Wrapper for (*Logger).Logc
+func Logc(lvl Level, closure func() string) {
+ Global.intLogc(lvl, closure)
+}
+
+// Utility for finest log messages (see Debug() for parameter explanation)
+// Wrapper for (*Logger).Finest
+func Finest(arg0 interface{}, args ...interface{}) {
+ const (
+ lvl = FINEST
+ )
+ switch first := arg0.(type) {
+ case string:
+ // Use the string as a format string
+ Global.intLogf(lvl, first, args...)
+ case func() string:
+ // Log the closure (no other arguments used)
+ Global.intLogc(lvl, first)
+ default:
+ // Build a format string so that it will be similar to Sprint
+ Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
+ }
+}
+
+// Utility for fine log messages (see Debug() for parameter explanation)
+// Wrapper for (*Logger).Fine
+func Fine(arg0 interface{}, args ...interface{}) {
+ const (
+ lvl = FINE
+ )
+ switch first := arg0.(type) {
+ case string:
+ // Use the string as a format string
+ Global.intLogf(lvl, first, args...)
+ case func() string:
+ // Log the closure (no other arguments used)
+ Global.intLogc(lvl, first)
+ default:
+ // Build a format string so that it will be similar to Sprint
+ Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
+ }
+}
+
+// Utility for debug log messages
+// When given a string as the first argument, this behaves like Logf but with the DEBUG log level (e.g. the first argument is interpreted as a format for the latter arguments)
+// When given a closure of type func()string, this logs the string returned by the closure iff it will be logged. The closure runs at most one time.
+// When given anything else, the log message will be each of the arguments formatted with %v and separated by spaces (ala Sprint).
+// Wrapper for (*Logger).Debug
+func Debug(arg0 interface{}, args ...interface{}) {
+ const (
+ lvl = DEBUG
+ )
+ switch first := arg0.(type) {
+ case string:
+ // Use the string as a format string
+ Global.intLogf(lvl, first, args...)
+ case func() string:
+ // Log the closure (no other arguments used)
+ Global.intLogc(lvl, first)
+ default:
+ // Build a format string so that it will be similar to Sprint
+ Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
+ }
+}
+
+// Utility for trace log messages (see Debug() for parameter explanation)
+// Wrapper for (*Logger).Trace
+func Trace(arg0 interface{}, args ...interface{}) {
+ const (
+ lvl = TRACE
+ )
+ switch first := arg0.(type) {
+ case string:
+ // Use the string as a format string
+ Global.intLogf(lvl, first, args...)
+ case func() string:
+ // Log the closure (no other arguments used)
+ Global.intLogc(lvl, first)
+ default:
+ // Build a format string so that it will be similar to Sprint
+ Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
+ }
+}
+
+// Utility for info log messages (see Debug() for parameter explanation)
+// Wrapper for (*Logger).Info
+func Info(arg0 interface{}, args ...interface{}) {
+ const (
+ lvl = INFO
+ )
+ switch first := arg0.(type) {
+ case string:
+ // Use the string as a format string
+ Global.intLogf(lvl, first, args...)
+ case func() string:
+ // Log the closure (no other arguments used)
+ Global.intLogc(lvl, first)
+ default:
+ // Build a format string so that it will be similar to Sprint
+ Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...)
+ }
+}
+
+// Utility for warn log messages (returns an error for easy function returns) (see Debug() for parameter explanation)
+// These functions will execute a closure exactly once, to build the error message for the return
+// Wrapper for (*Logger).Warn
+func Warn(arg0 interface{}, args ...interface{}) error {
+ const (
+ lvl = WARNING
+ )
+ switch first := arg0.(type) {
+ case string:
+ // Use the string as a format string
+ Global.intLogf(lvl, first, args...)
+ return errors.New(fmt.Sprintf(first, args...))
+ case func() string:
+ // Log the closure (no other arguments used)
+ str := first()
+ Global.intLogf(lvl, "%s", str)
+ return errors.New(str)
+ default:
+ // Build a format string so that it will be similar to Sprint
+ Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...)
+ return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...))
+ }
+ return nil
+}
+
+// Utility for error log messages (returns an error for easy function returns) (see Debug() for parameter explanation)
+// These functions will execute a closure exactly once, to build the error message for the return
+// Wrapper for (*Logger).Error
+func Error(arg0 interface{}, args ...interface{}) error {
+ const (
+ lvl = ERROR
+ )
+ switch first := arg0.(type) {
+ case string:
+ // Use the string as a format string
+ Global.intLogf(lvl, first, args...)
+ return errors.New(fmt.Sprintf(first, args...))
+ case func() string:
+ // Log the closure (no other arguments used)
+ str := first()
+ Global.intLogf(lvl, "%s", str)
+ return errors.New(str)
+ default:
+ // Build a format string so that it will be similar to Sprint
+ Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...)
+ return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...))
+ }
+ return nil
+}
+
+// Utility for critical log messages (returns an error for easy function returns) (see Debug() for parameter explanation)
+// These functions will execute a closure exactly once, to build the error message for the return
+// Wrapper for (*Logger).Critical
+func Critical(arg0 interface{}, args ...interface{}) error {
+ const (
+ lvl = CRITICAL
+ )
+ switch first := arg0.(type) {
+ case string:
+ // Use the string as a format string
+ Global.intLogf(lvl, first, args...)
+ return errors.New(fmt.Sprintf(first, args...))
+ case func() string:
+ // Log the closure (no other arguments used)
+ str := first()
+ Global.intLogf(lvl, "%s", str)
+ return errors.New(str)
+ default:
+ // Build a format string so that it will be similar to Sprint
+ Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...)
+ return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...))
+ }
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/LICENSE.txt
new file mode 100644
index 00000000..ead98cf0
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/LICENSE.txt
@@ -0,0 +1,897 @@
+Mattermost Licensing
+
+SOFTWARE LICENSING
+
+You are licensed to use compiled versions of the Mattermost platform produced by Mattermost, Inc. under an MIT LICENSE
+
+- See MIT-COMPILED-LICENSE.md included in compiled versions for details
+
+You may be licensed to use source code to create compiled versions not produced by Mattermost, Inc. in one of two ways:
+
+1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or
+2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com
+
+You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/, model/,
+webapp/client, webapp/fonts, webapp/i18n, webapp/images and all subdirectories thereof) under the Apache License v2.0.
+
+We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not
+link to the Mattermost Platform directly, but exclusively uses the Mattermost Admin Tools and Configuration Files, and
+(b) you have not modified, added to or adapted the source code of Mattermost in a way that results in the creation of
+a “modified version” or “work based on” Mattermost as these terms are defined in the AGPL v3.0 license.
+
+MATTERMOST TRADEMARK GUIDELINES
+
+Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organization’s Trademark
+Standards of Use at http://www.mattermost.org/trademark-standards-of-use/. For trademark approval or any questions
+you have about using these trademarks, please email trademark@mattermost.com
+
+------------------------------------------------------------------------------------------------------------------------------
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+------------------------------------------------------------------------------
+
+The software is released under the terms of the GNU Affero General Public
+License, version 3.
+
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/add.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/add.go
new file mode 100644
index 00000000..0e5f6cdb
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/add.go
@@ -0,0 +1,113 @@
+//
+// https://tools.ietf.org/html/rfc4511
+//
+// AddRequest ::= [APPLICATION 8] SEQUENCE {
+// entry LDAPDN,
+// attributes AttributeList }
+//
+// AttributeList ::= SEQUENCE OF attribute Attribute
+
+package ldap
+
+import (
+ "errors"
+ "log"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// Attribute represents an LDAP attribute
+type Attribute struct {
+ // Type is the name of the LDAP attribute
+ Type string
+ // Vals are the LDAP attribute values
+ Vals []string
+}
+
+func (a *Attribute) encode() *ber.Packet {
+ seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attribute")
+ seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.Type, "Type"))
+ set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
+ for _, value := range a.Vals {
+ set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
+ }
+ seq.AppendChild(set)
+ return seq
+}
+
+// AddRequest represents an LDAP AddRequest operation
+type AddRequest struct {
+ // DN identifies the entry being added
+ DN string
+ // Attributes list the attributes of the new entry
+ Attributes []Attribute
+}
+
+func (a AddRequest) encode() *ber.Packet {
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationAddRequest, nil, "Add Request")
+ request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.DN, "DN"))
+ attributes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
+ for _, attribute := range a.Attributes {
+ attributes.AppendChild(attribute.encode())
+ }
+ request.AppendChild(attributes)
+ return request
+}
+
+// Attribute adds an attribute with the given type and values
+func (a *AddRequest) Attribute(attrType string, attrVals []string) {
+ a.Attributes = append(a.Attributes, Attribute{Type: attrType, Vals: attrVals})
+}
+
+// NewAddRequest returns an AddRequest for the given DN, with no attributes
+func NewAddRequest(dn string) *AddRequest {
+ return &AddRequest{
+ DN: dn,
+ }
+
+}
+
+// Add performs the given AddRequest
+func (l *Conn) Add(addRequest *AddRequest) error {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ packet.AppendChild(addRequest.encode())
+
+ l.Debug.PrintPacket(packet)
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return err
+ }
+ defer l.finishMessage(msgCtx)
+
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ if packet.Children[1].Tag == ApplicationAddResponse {
+ resultCode, resultDescription := getLDAPResultCode(packet)
+ if resultCode != 0 {
+ return NewError(resultCode, errors.New(resultDescription))
+ }
+ } else {
+ log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
+ }
+
+ l.Debug.Printf("%d: returning", msgCtx.id)
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/atomic_value.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/atomic_value.go
new file mode 100644
index 00000000..bccf7573
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/atomic_value.go
@@ -0,0 +1,13 @@
+// +build go1.4
+
+package ldap
+
+import (
+ "sync/atomic"
+)
+
+// For compilers that support it, we just use the underlying sync/atomic.Value
+// type.
+type atomicValue struct {
+ atomic.Value
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/atomic_value_go13.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/atomic_value_go13.go
new file mode 100644
index 00000000..04920bb2
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/atomic_value_go13.go
@@ -0,0 +1,28 @@
+// +build !go1.4
+
+package ldap
+
+import (
+ "sync"
+)
+
+// This is a helper type that emulates the use of the "sync/atomic.Value"
+// struct that's available in Go 1.4 and up.
+type atomicValue struct {
+ value interface{}
+ lock sync.RWMutex
+}
+
+func (av *atomicValue) Store(val interface{}) {
+ av.lock.Lock()
+ av.value = val
+ av.lock.Unlock()
+}
+
+func (av *atomicValue) Load() interface{} {
+ av.lock.RLock()
+ ret := av.value
+ av.lock.RUnlock()
+
+ return ret
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/bind.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/bind.go
new file mode 100644
index 00000000..26b3cc72
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/bind.go
@@ -0,0 +1,143 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ldap
+
+import (
+ "errors"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// SimpleBindRequest represents a username/password bind operation
+type SimpleBindRequest struct {
+ // Username is the name of the Directory object that the client wishes to bind as
+ Username string
+ // Password is the credentials to bind with
+ Password string
+ // Controls are optional controls to send with the bind request
+ Controls []Control
+}
+
+// SimpleBindResult contains the response from the server
+type SimpleBindResult struct {
+ Controls []Control
+}
+
+// NewSimpleBindRequest returns a bind request
+func NewSimpleBindRequest(username string, password string, controls []Control) *SimpleBindRequest {
+ return &SimpleBindRequest{
+ Username: username,
+ Password: password,
+ Controls: controls,
+ }
+}
+
+func (bindRequest *SimpleBindRequest) encode() *ber.Packet {
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
+ request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
+ request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, bindRequest.Username, "User Name"))
+ request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, bindRequest.Password, "Password"))
+
+ request.AppendChild(encodeControls(bindRequest.Controls))
+
+ return request
+}
+
+// SimpleBind performs the simple bind operation defined in the given request
+func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ encodedBindRequest := simpleBindRequest.encode()
+ packet.AppendChild(encodedBindRequest)
+
+ if l.Debug {
+ ber.PrintPacket(packet)
+ }
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return nil, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return nil, err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return nil, err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ result := &SimpleBindResult{
+ Controls: make([]Control, 0),
+ }
+
+ if len(packet.Children) == 3 {
+ for _, child := range packet.Children[2].Children {
+ result.Controls = append(result.Controls, DecodeControl(child))
+ }
+ }
+
+ resultCode, resultDescription := getLDAPResultCode(packet)
+ if resultCode != 0 {
+ return result, NewError(resultCode, errors.New(resultDescription))
+ }
+
+ return result, nil
+}
+
+// Bind performs a bind with the given username and password
+func (l *Conn) Bind(username, password string) error {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ bindRequest := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
+ bindRequest.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
+ bindRequest.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, username, "User Name"))
+ bindRequest.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, password, "Password"))
+ packet.AppendChild(bindRequest)
+
+ if l.Debug {
+ ber.PrintPacket(packet)
+ }
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return err
+ }
+ defer l.finishMessage(msgCtx)
+
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ resultCode, resultDescription := getLDAPResultCode(packet)
+ if resultCode != 0 {
+ return NewError(resultCode, errors.New(resultDescription))
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/client.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/client.go
new file mode 100644
index 00000000..055b27b5
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/client.go
@@ -0,0 +1,27 @@
+package ldap
+
+import (
+ "crypto/tls"
+ "time"
+)
+
+// Client knows how to interact with an LDAP server
+type Client interface {
+ Start()
+ StartTLS(config *tls.Config) error
+ Close()
+ SetTimeout(time.Duration)
+
+ Bind(username, password string) error
+ SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error)
+
+ Add(addRequest *AddRequest) error
+ Del(delRequest *DelRequest) error
+ Modify(modifyRequest *ModifyRequest) error
+
+ Compare(dn, attribute, value string) (bool, error)
+ PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error)
+
+ Search(searchRequest *SearchRequest) (*SearchResult, error)
+ SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/compare.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/compare.go
new file mode 100644
index 00000000..cc6d2af5
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/compare.go
@@ -0,0 +1,85 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// File contains Compare functionality
+//
+// https://tools.ietf.org/html/rfc4511
+//
+// CompareRequest ::= [APPLICATION 14] SEQUENCE {
+// entry LDAPDN,
+// ava AttributeValueAssertion }
+//
+// AttributeValueAssertion ::= SEQUENCE {
+// attributeDesc AttributeDescription,
+// assertionValue AssertionValue }
+//
+// AttributeDescription ::= LDAPString
+// -- Constrained to <attributedescription>
+// -- [RFC4512]
+//
+// AttributeValue ::= OCTET STRING
+//
+
+package ldap
+
+import (
+ "errors"
+ "fmt"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// Compare checks to see if the attribute of the dn matches value. Returns true if it does otherwise
+// false with any error that occurs if any.
+func (l *Conn) Compare(dn, attribute, value string) (bool, error) {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationCompareRequest, nil, "Compare Request")
+ request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, dn, "DN"))
+
+ ava := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "AttributeValueAssertion")
+ ava.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "AttributeDesc"))
+ ava.AppendChild(ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagOctetString, value, "AssertionValue"))
+ request.AppendChild(ava)
+ packet.AppendChild(request)
+
+ l.Debug.PrintPacket(packet)
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return false, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return false, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return false, err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return false, err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ if packet.Children[1].Tag == ApplicationCompareResponse {
+ resultCode, resultDescription := getLDAPResultCode(packet)
+ if resultCode == LDAPResultCompareTrue {
+ return true, nil
+ } else if resultCode == LDAPResultCompareFalse {
+ return false, nil
+ } else {
+ return false, NewError(resultCode, errors.New(resultDescription))
+ }
+ }
+ return false, fmt.Errorf("Unexpected Response: %d", packet.Children[1].Tag)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/conn.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/conn.go
new file mode 100644
index 00000000..eb28eb47
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/conn.go
@@ -0,0 +1,470 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ldap
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "log"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+const (
+ // MessageQuit causes the processMessages loop to exit
+ MessageQuit = 0
+ // MessageRequest sends a request to the server
+ MessageRequest = 1
+ // MessageResponse receives a response from the server
+ MessageResponse = 2
+ // MessageFinish indicates the client considers a particular message ID to be finished
+ MessageFinish = 3
+ // MessageTimeout indicates the client-specified timeout for a particular message ID has been reached
+ MessageTimeout = 4
+)
+
+// PacketResponse contains the packet or error encountered reading a response
+type PacketResponse struct {
+ // Packet is the packet read from the server
+ Packet *ber.Packet
+ // Error is an error encountered while reading
+ Error error
+}
+
+// ReadPacket returns the packet or an error
+func (pr *PacketResponse) ReadPacket() (*ber.Packet, error) {
+ if (pr == nil) || (pr.Packet == nil && pr.Error == nil) {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve response"))
+ }
+ return pr.Packet, pr.Error
+}
+
+type messageContext struct {
+ id int64
+ // close(done) should only be called from finishMessage()
+ done chan struct{}
+ // close(responses) should only be called from processMessages(), and only sent to from sendResponse()
+ responses chan *PacketResponse
+}
+
+// sendResponse should only be called within the processMessages() loop which
+// is also responsible for closing the responses channel.
+func (msgCtx *messageContext) sendResponse(packet *PacketResponse) {
+ select {
+ case msgCtx.responses <- packet:
+ // Successfully sent packet to message handler.
+ case <-msgCtx.done:
+ // The request handler is done and will not receive more
+ // packets.
+ }
+}
+
+type messagePacket struct {
+ Op int
+ MessageID int64
+ Packet *ber.Packet
+ Context *messageContext
+}
+
+type sendMessageFlags uint
+
+const (
+ startTLS sendMessageFlags = 1 << iota
+)
+
+// Conn represents an LDAP Connection
+type Conn struct {
+ conn net.Conn
+ isTLS bool
+ closing uint32
+ closeErr atomicValue
+ isStartingTLS bool
+ Debug debugging
+ chanConfirm chan struct{}
+ messageContexts map[int64]*messageContext
+ chanMessage chan *messagePacket
+ chanMessageID chan int64
+ wgClose sync.WaitGroup
+ outstandingRequests uint
+ messageMutex sync.Mutex
+ requestTimeout int64
+}
+
+var _ Client = &Conn{}
+
+// DefaultTimeout is a package-level variable that sets the timeout value
+// used for the Dial and DialTLS methods.
+//
+// WARNING: since this is a package-level variable, setting this value from
+// multiple places will probably result in undesired behaviour.
+var DefaultTimeout = 60 * time.Second
+
+// Dial connects to the given address on the given network using net.Dial
+// and then returns a new Conn for the connection.
+func Dial(network, addr string) (*Conn, error) {
+ c, err := net.DialTimeout(network, addr, DefaultTimeout)
+ if err != nil {
+ return nil, NewError(ErrorNetwork, err)
+ }
+ conn := NewConn(c, false)
+ conn.Start()
+ return conn, nil
+}
+
+// DialTLS connects to the given address on the given network using tls.Dial
+// and then returns a new Conn for the connection.
+func DialTLS(network, addr string, config *tls.Config) (*Conn, error) {
+ dc, err := net.DialTimeout(network, addr, DefaultTimeout)
+ if err != nil {
+ return nil, NewError(ErrorNetwork, err)
+ }
+ c := tls.Client(dc, config)
+ err = c.Handshake()
+ if err != nil {
+ // Handshake error, close the established connection before we return an error
+ dc.Close()
+ return nil, NewError(ErrorNetwork, err)
+ }
+ conn := NewConn(c, true)
+ conn.Start()
+ return conn, nil
+}
+
+// NewConn returns a new Conn using conn for network I/O.
+func NewConn(conn net.Conn, isTLS bool) *Conn {
+ return &Conn{
+ conn: conn,
+ chanConfirm: make(chan struct{}),
+ chanMessageID: make(chan int64),
+ chanMessage: make(chan *messagePacket, 10),
+ messageContexts: map[int64]*messageContext{},
+ requestTimeout: 0,
+ isTLS: isTLS,
+ }
+}
+
+// Start initializes goroutines to read responses and process messages
+func (l *Conn) Start() {
+ go l.reader()
+ go l.processMessages()
+ l.wgClose.Add(1)
+}
+
+// isClosing returns whether or not we're currently closing.
+func (l *Conn) isClosing() bool {
+ return atomic.LoadUint32(&l.closing) == 1
+}
+
+// setClosing sets the closing value to true
+func (l *Conn) setClosing() bool {
+ return atomic.CompareAndSwapUint32(&l.closing, 0, 1)
+}
+
+// Close closes the connection.
+func (l *Conn) Close() {
+ l.messageMutex.Lock()
+ defer l.messageMutex.Unlock()
+
+ if l.setClosing() {
+ l.Debug.Printf("Sending quit message and waiting for confirmation")
+ l.chanMessage <- &messagePacket{Op: MessageQuit}
+ <-l.chanConfirm
+ close(l.chanMessage)
+
+ l.Debug.Printf("Closing network connection")
+ if err := l.conn.Close(); err != nil {
+ log.Println(err)
+ }
+
+ l.wgClose.Done()
+ }
+ l.wgClose.Wait()
+}
+
+// SetTimeout sets the time after a request is sent that a MessageTimeout triggers
+func (l *Conn) SetTimeout(timeout time.Duration) {
+ if timeout > 0 {
+ atomic.StoreInt64(&l.requestTimeout, int64(timeout))
+ }
+}
+
+// Returns the next available messageID
+func (l *Conn) nextMessageID() int64 {
+ if messageID, ok := <-l.chanMessageID; ok {
+ return messageID
+ }
+ return 0
+}
+
+// StartTLS sends the command to start a TLS session and then creates a new TLS Client
+func (l *Conn) StartTLS(config *tls.Config) error {
+ if l.isTLS {
+ return NewError(ErrorNetwork, errors.New("ldap: already encrypted"))
+ }
+
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Start TLS")
+ request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, "1.3.6.1.4.1.1466.20037", "TLS Extended Command"))
+ packet.AppendChild(request)
+ l.Debug.PrintPacket(packet)
+
+ msgCtx, err := l.sendMessageWithFlags(packet, startTLS)
+ if err != nil {
+ return err
+ }
+ defer l.finishMessage(msgCtx)
+
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ l.Close()
+ return err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ if resultCode, message := getLDAPResultCode(packet); resultCode == LDAPResultSuccess {
+ conn := tls.Client(l.conn, config)
+
+ if err := conn.Handshake(); err != nil {
+ l.Close()
+ return NewError(ErrorNetwork, fmt.Errorf("TLS handshake failed (%v)", err))
+ }
+
+ l.isTLS = true
+ l.conn = conn
+ } else {
+ return NewError(resultCode, fmt.Errorf("ldap: cannot StartTLS (%s)", message))
+ }
+ go l.reader()
+
+ return nil
+}
+
+func (l *Conn) sendMessage(packet *ber.Packet) (*messageContext, error) {
+ return l.sendMessageWithFlags(packet, 0)
+}
+
+func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) {
+ if l.isClosing() {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed"))
+ }
+ l.messageMutex.Lock()
+ l.Debug.Printf("flags&startTLS = %d", flags&startTLS)
+ if l.isStartingTLS {
+ l.messageMutex.Unlock()
+ return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase"))
+ }
+ if flags&startTLS != 0 {
+ if l.outstandingRequests != 0 {
+ l.messageMutex.Unlock()
+ return nil, NewError(ErrorNetwork, errors.New("ldap: cannot StartTLS with outstanding requests"))
+ }
+ l.isStartingTLS = true
+ }
+ l.outstandingRequests++
+
+ l.messageMutex.Unlock()
+
+ responses := make(chan *PacketResponse)
+ messageID := packet.Children[0].Value.(int64)
+ message := &messagePacket{
+ Op: MessageRequest,
+ MessageID: messageID,
+ Packet: packet,
+ Context: &messageContext{
+ id: messageID,
+ done: make(chan struct{}),
+ responses: responses,
+ },
+ }
+ l.sendProcessMessage(message)
+ return message.Context, nil
+}
+
+func (l *Conn) finishMessage(msgCtx *messageContext) {
+ close(msgCtx.done)
+
+ if l.isClosing() {
+ return
+ }
+
+ l.messageMutex.Lock()
+ l.outstandingRequests--
+ if l.isStartingTLS {
+ l.isStartingTLS = false
+ }
+ l.messageMutex.Unlock()
+
+ message := &messagePacket{
+ Op: MessageFinish,
+ MessageID: msgCtx.id,
+ }
+ l.sendProcessMessage(message)
+}
+
+func (l *Conn) sendProcessMessage(message *messagePacket) bool {
+ l.messageMutex.Lock()
+ defer l.messageMutex.Unlock()
+ if l.isClosing() {
+ return false
+ }
+ l.chanMessage <- message
+ return true
+}
+
+func (l *Conn) processMessages() {
+ defer func() {
+ if err := recover(); err != nil {
+ log.Printf("ldap: recovered panic in processMessages: %v", err)
+ }
+ for messageID, msgCtx := range l.messageContexts {
+ // If we are closing due to an error, inform anyone who
+ // is waiting about the error.
+ if l.isClosing() && l.closeErr.Load() != nil {
+ msgCtx.sendResponse(&PacketResponse{Error: l.closeErr.Load().(error)})
+ }
+ l.Debug.Printf("Closing channel for MessageID %d", messageID)
+ close(msgCtx.responses)
+ delete(l.messageContexts, messageID)
+ }
+ close(l.chanMessageID)
+ close(l.chanConfirm)
+ }()
+
+ var messageID int64 = 1
+ for {
+ select {
+ case l.chanMessageID <- messageID:
+ messageID++
+ case message := <-l.chanMessage:
+ switch message.Op {
+ case MessageQuit:
+ l.Debug.Printf("Shutting down - quit message received")
+ return
+ case MessageRequest:
+ // Add to message list and write to network
+ l.Debug.Printf("Sending message %d", message.MessageID)
+
+ buf := message.Packet.Bytes()
+ _, err := l.conn.Write(buf)
+ if err != nil {
+ l.Debug.Printf("Error Sending Message: %s", err.Error())
+ message.Context.sendResponse(&PacketResponse{Error: fmt.Errorf("unable to send request: %s", err)})
+ close(message.Context.responses)
+ break
+ }
+
+ // Only add to messageContexts if we were able to
+ // successfully write the message.
+ l.messageContexts[message.MessageID] = message.Context
+
+ // Add timeout if defined
+ requestTimeout := time.Duration(atomic.LoadInt64(&l.requestTimeout))
+ if requestTimeout > 0 {
+ go func() {
+ defer func() {
+ if err := recover(); err != nil {
+ log.Printf("ldap: recovered panic in RequestTimeout: %v", err)
+ }
+ }()
+ time.Sleep(requestTimeout)
+ timeoutMessage := &messagePacket{
+ Op: MessageTimeout,
+ MessageID: message.MessageID,
+ }
+ l.sendProcessMessage(timeoutMessage)
+ }()
+ }
+ case MessageResponse:
+ l.Debug.Printf("Receiving message %d", message.MessageID)
+ if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
+ msgCtx.sendResponse(&PacketResponse{message.Packet, nil})
+ } else {
+ log.Printf("Received unexpected message %d, %v", message.MessageID, l.isClosing())
+ ber.PrintPacket(message.Packet)
+ }
+ case MessageTimeout:
+ // Handle the timeout by closing the channel
+ // All reads will return immediately
+ if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
+ l.Debug.Printf("Receiving message timeout for %d", message.MessageID)
+ msgCtx.sendResponse(&PacketResponse{message.Packet, errors.New("ldap: connection timed out")})
+ delete(l.messageContexts, message.MessageID)
+ close(msgCtx.responses)
+ }
+ case MessageFinish:
+ l.Debug.Printf("Finished message %d", message.MessageID)
+ if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
+ delete(l.messageContexts, message.MessageID)
+ close(msgCtx.responses)
+ }
+ }
+ }
+ }
+}
+
+func (l *Conn) reader() {
+ cleanstop := false
+ defer func() {
+ if err := recover(); err != nil {
+ log.Printf("ldap: recovered panic in reader: %v", err)
+ }
+ if !cleanstop {
+ l.Close()
+ }
+ }()
+
+ for {
+ if cleanstop {
+ l.Debug.Printf("reader clean stopping (without closing the connection)")
+ return
+ }
+ packet, err := ber.ReadPacket(l.conn)
+ if err != nil {
+ // A read error is expected here if we are closing the connection...
+ if !l.isClosing() {
+ l.closeErr.Store(fmt.Errorf("unable to read LDAP response packet: %s", err))
+ l.Debug.Printf("reader error: %s", err.Error())
+ }
+ return
+ }
+ addLDAPDescriptions(packet)
+ if len(packet.Children) == 0 {
+ l.Debug.Printf("Received bad ldap packet")
+ continue
+ }
+ l.messageMutex.Lock()
+ if l.isStartingTLS {
+ cleanstop = true
+ }
+ l.messageMutex.Unlock()
+ message := &messagePacket{
+ Op: MessageResponse,
+ MessageID: packet.Children[0].Value.(int64),
+ Packet: packet,
+ }
+ if !l.sendProcessMessage(message) {
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/control.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/control.go
new file mode 100644
index 00000000..342f325c
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/control.go
@@ -0,0 +1,420 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ldap
+
+import (
+ "fmt"
+ "strconv"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+const (
+ // ControlTypePaging - https://www.ietf.org/rfc/rfc2696.txt
+ ControlTypePaging = "1.2.840.113556.1.4.319"
+ // ControlTypeBeheraPasswordPolicy - https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
+ ControlTypeBeheraPasswordPolicy = "1.3.6.1.4.1.42.2.27.8.5.1"
+ // ControlTypeVChuPasswordMustChange - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
+ ControlTypeVChuPasswordMustChange = "2.16.840.1.113730.3.4.4"
+ // ControlTypeVChuPasswordWarning - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
+ ControlTypeVChuPasswordWarning = "2.16.840.1.113730.3.4.5"
+ // ControlTypeManageDsaIT - https://tools.ietf.org/html/rfc3296
+ ControlTypeManageDsaIT = "2.16.840.1.113730.3.4.2"
+)
+
+// ControlTypeMap maps controls to text descriptions
+var ControlTypeMap = map[string]string{
+ ControlTypePaging: "Paging",
+ ControlTypeBeheraPasswordPolicy: "Password Policy - Behera Draft",
+ ControlTypeManageDsaIT: "Manage DSA IT",
+}
+
+// Control defines an interface controls provide to encode and describe themselves
+type Control interface {
+ // GetControlType returns the OID
+ GetControlType() string
+ // Encode returns the ber packet representation
+ Encode() *ber.Packet
+ // String returns a human-readable description
+ String() string
+}
+
+// ControlString implements the Control interface for simple controls
+type ControlString struct {
+ ControlType string
+ Criticality bool
+ ControlValue string
+}
+
+// GetControlType returns the OID
+func (c *ControlString) GetControlType() string {
+ return c.ControlType
+}
+
+// Encode returns the ber packet representation
+func (c *ControlString) Encode() *ber.Packet {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, c.ControlType, "Control Type ("+ControlTypeMap[c.ControlType]+")"))
+ if c.Criticality {
+ packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
+ }
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, string(c.ControlValue), "Control Value"))
+ return packet
+}
+
+// String returns a human-readable description
+func (c *ControlString) String() string {
+ return fmt.Sprintf("Control Type: %s (%q) Criticality: %t Control Value: %s", ControlTypeMap[c.ControlType], c.ControlType, c.Criticality, c.ControlValue)
+}
+
+// ControlPaging implements the paging control described in https://www.ietf.org/rfc/rfc2696.txt
+type ControlPaging struct {
+ // PagingSize indicates the page size
+ PagingSize uint32
+ // Cookie is an opaque value returned by the server to track a paging cursor
+ Cookie []byte
+}
+
+// GetControlType returns the OID
+func (c *ControlPaging) GetControlType() string {
+ return ControlTypePaging
+}
+
+// Encode returns the ber packet representation
+func (c *ControlPaging) Encode() *ber.Packet {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypePaging, "Control Type ("+ControlTypeMap[ControlTypePaging]+")"))
+
+ p2 := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Control Value (Paging)")
+ seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Search Control Value")
+ seq.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, int64(c.PagingSize), "Paging Size"))
+ cookie := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Cookie")
+ cookie.Value = c.Cookie
+ cookie.Data.Write(c.Cookie)
+ seq.AppendChild(cookie)
+ p2.AppendChild(seq)
+
+ packet.AppendChild(p2)
+ return packet
+}
+
+// String returns a human-readable description
+func (c *ControlPaging) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t PagingSize: %d Cookie: %q",
+ ControlTypeMap[ControlTypePaging],
+ ControlTypePaging,
+ false,
+ c.PagingSize,
+ c.Cookie)
+}
+
+// SetCookie stores the given cookie in the paging control
+func (c *ControlPaging) SetCookie(cookie []byte) {
+ c.Cookie = cookie
+}
+
+// ControlBeheraPasswordPolicy implements the control described in https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
+type ControlBeheraPasswordPolicy struct {
+ // Expire contains the number of seconds before a password will expire
+ Expire int64
+ // Grace indicates the remaining number of times a user will be allowed to authenticate with an expired password
+ Grace int64
+ // Error indicates the error code
+ Error int8
+ // ErrorString is a human readable error
+ ErrorString string
+}
+
+// GetControlType returns the OID
+func (c *ControlBeheraPasswordPolicy) GetControlType() string {
+ return ControlTypeBeheraPasswordPolicy
+}
+
+// Encode returns the ber packet representation
+func (c *ControlBeheraPasswordPolicy) Encode() *ber.Packet {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeBeheraPasswordPolicy, "Control Type ("+ControlTypeMap[ControlTypeBeheraPasswordPolicy]+")"))
+
+ return packet
+}
+
+// String returns a human-readable description
+func (c *ControlBeheraPasswordPolicy) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t Expire: %d Grace: %d Error: %d, ErrorString: %s",
+ ControlTypeMap[ControlTypeBeheraPasswordPolicy],
+ ControlTypeBeheraPasswordPolicy,
+ false,
+ c.Expire,
+ c.Grace,
+ c.Error,
+ c.ErrorString)
+}
+
+// ControlVChuPasswordMustChange implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
+type ControlVChuPasswordMustChange struct {
+ // MustChange indicates if the password is required to be changed
+ MustChange bool
+}
+
+// GetControlType returns the OID
+func (c *ControlVChuPasswordMustChange) GetControlType() string {
+ return ControlTypeVChuPasswordMustChange
+}
+
+// Encode returns the ber packet representation
+func (c *ControlVChuPasswordMustChange) Encode() *ber.Packet {
+ return nil
+}
+
+// String returns a human-readable description
+func (c *ControlVChuPasswordMustChange) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t MustChange: %v",
+ ControlTypeMap[ControlTypeVChuPasswordMustChange],
+ ControlTypeVChuPasswordMustChange,
+ false,
+ c.MustChange)
+}
+
+// ControlVChuPasswordWarning implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
+type ControlVChuPasswordWarning struct {
+ // Expire indicates the time in seconds until the password expires
+ Expire int64
+}
+
+// GetControlType returns the OID
+func (c *ControlVChuPasswordWarning) GetControlType() string {
+ return ControlTypeVChuPasswordWarning
+}
+
+// Encode returns the ber packet representation
+func (c *ControlVChuPasswordWarning) Encode() *ber.Packet {
+ return nil
+}
+
+// String returns a human-readable description
+func (c *ControlVChuPasswordWarning) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t Expire: %b",
+ ControlTypeMap[ControlTypeVChuPasswordWarning],
+ ControlTypeVChuPasswordWarning,
+ false,
+ c.Expire)
+}
+
+// ControlManageDsaIT implements the control described in https://tools.ietf.org/html/rfc3296
+type ControlManageDsaIT struct {
+ // Criticality indicates if this control is required
+ Criticality bool
+}
+
+// GetControlType returns the OID
+func (c *ControlManageDsaIT) GetControlType() string {
+ return ControlTypeManageDsaIT
+}
+
+// Encode returns the ber packet representation
+func (c *ControlManageDsaIT) Encode() *ber.Packet {
+ //FIXME
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeManageDsaIT, "Control Type ("+ControlTypeMap[ControlTypeManageDsaIT]+")"))
+ if c.Criticality {
+ packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
+ }
+ return packet
+}
+
+// String returns a human-readable description
+func (c *ControlManageDsaIT) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t",
+ ControlTypeMap[ControlTypeManageDsaIT],
+ ControlTypeManageDsaIT,
+ c.Criticality)
+}
+
+// NewControlManageDsaIT returns a ControlManageDsaIT control
+func NewControlManageDsaIT(Criticality bool) *ControlManageDsaIT {
+ return &ControlManageDsaIT{Criticality: Criticality}
+}
+
+// FindControl returns the first control of the given type in the list, or nil
+func FindControl(controls []Control, controlType string) Control {
+ for _, c := range controls {
+ if c.GetControlType() == controlType {
+ return c
+ }
+ }
+ return nil
+}
+
+// DecodeControl returns a control read from the given packet, or nil if no recognized control can be made
+func DecodeControl(packet *ber.Packet) Control {
+ var (
+ ControlType = ""
+ Criticality = false
+ value *ber.Packet
+ )
+
+ switch len(packet.Children) {
+ case 0:
+ // at least one child is required for control type
+ return nil
+
+ case 1:
+ // just type, no criticality or value
+ packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
+ ControlType = packet.Children[0].Value.(string)
+
+ case 2:
+ packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
+ ControlType = packet.Children[0].Value.(string)
+
+ // Children[1] could be criticality or value (both are optional)
+ // duck-type on whether this is a boolean
+ if _, ok := packet.Children[1].Value.(bool); ok {
+ packet.Children[1].Description = "Criticality"
+ Criticality = packet.Children[1].Value.(bool)
+ } else {
+ packet.Children[1].Description = "Control Value"
+ value = packet.Children[1]
+ }
+
+ case 3:
+ packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
+ ControlType = packet.Children[0].Value.(string)
+
+ packet.Children[1].Description = "Criticality"
+ Criticality = packet.Children[1].Value.(bool)
+
+ packet.Children[2].Description = "Control Value"
+ value = packet.Children[2]
+
+ default:
+ // more than 3 children is invalid
+ return nil
+ }
+
+ switch ControlType {
+ case ControlTypeManageDsaIT:
+ return NewControlManageDsaIT(Criticality)
+ case ControlTypePaging:
+ value.Description += " (Paging)"
+ c := new(ControlPaging)
+ if value.Value != nil {
+ valueChildren := ber.DecodePacket(value.Data.Bytes())
+ value.Data.Truncate(0)
+ value.Value = nil
+ value.AppendChild(valueChildren)
+ }
+ value = value.Children[0]
+ value.Description = "Search Control Value"
+ value.Children[0].Description = "Paging Size"
+ value.Children[1].Description = "Cookie"
+ c.PagingSize = uint32(value.Children[0].Value.(int64))
+ c.Cookie = value.Children[1].Data.Bytes()
+ value.Children[1].Value = c.Cookie
+ return c
+ case ControlTypeBeheraPasswordPolicy:
+ value.Description += " (Password Policy - Behera)"
+ c := NewControlBeheraPasswordPolicy()
+ if value.Value != nil {
+ valueChildren := ber.DecodePacket(value.Data.Bytes())
+ value.Data.Truncate(0)
+ value.Value = nil
+ value.AppendChild(valueChildren)
+ }
+
+ sequence := value.Children[0]
+
+ for _, child := range sequence.Children {
+ if child.Tag == 0 {
+ //Warning
+ warningPacket := child.Children[0]
+ packet := ber.DecodePacket(warningPacket.Data.Bytes())
+ val, ok := packet.Value.(int64)
+ if ok {
+ if warningPacket.Tag == 0 {
+ //timeBeforeExpiration
+ c.Expire = val
+ warningPacket.Value = c.Expire
+ } else if warningPacket.Tag == 1 {
+ //graceAuthNsRemaining
+ c.Grace = val
+ warningPacket.Value = c.Grace
+ }
+ }
+ } else if child.Tag == 1 {
+ // Error
+ packet := ber.DecodePacket(child.Data.Bytes())
+ val, ok := packet.Value.(int8)
+ if !ok {
+ // what to do?
+ val = -1
+ }
+ c.Error = val
+ child.Value = c.Error
+ c.ErrorString = BeheraPasswordPolicyErrorMap[c.Error]
+ }
+ }
+ return c
+ case ControlTypeVChuPasswordMustChange:
+ c := &ControlVChuPasswordMustChange{MustChange: true}
+ return c
+ case ControlTypeVChuPasswordWarning:
+ c := &ControlVChuPasswordWarning{Expire: -1}
+ expireStr := ber.DecodeString(value.Data.Bytes())
+
+ expire, err := strconv.ParseInt(expireStr, 10, 64)
+ if err != nil {
+ return nil
+ }
+ c.Expire = expire
+ value.Value = c.Expire
+
+ return c
+ default:
+ c := new(ControlString)
+ c.ControlType = ControlType
+ c.Criticality = Criticality
+ if value != nil {
+ c.ControlValue = value.Value.(string)
+ }
+ return c
+ }
+}
+
+// NewControlString returns a generic control
+func NewControlString(controlType string, criticality bool, controlValue string) *ControlString {
+ return &ControlString{
+ ControlType: controlType,
+ Criticality: criticality,
+ ControlValue: controlValue,
+ }
+}
+
+// NewControlPaging returns a paging control
+func NewControlPaging(pagingSize uint32) *ControlPaging {
+ return &ControlPaging{PagingSize: pagingSize}
+}
+
+// NewControlBeheraPasswordPolicy returns a ControlBeheraPasswordPolicy
+func NewControlBeheraPasswordPolicy() *ControlBeheraPasswordPolicy {
+ return &ControlBeheraPasswordPolicy{
+ Expire: -1,
+ Grace: -1,
+ Error: -1,
+ }
+}
+
+func encodeControls(controls []Control) *ber.Packet {
+ packet := ber.Encode(ber.ClassContext, ber.TypeConstructed, 0, nil, "Controls")
+ for _, control := range controls {
+ packet.AppendChild(control.Encode())
+ }
+ return packet
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/debug.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/debug.go
new file mode 100644
index 00000000..7279fc25
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/debug.go
@@ -0,0 +1,24 @@
+package ldap
+
+import (
+ "log"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// debugging type
+// - has a Printf method to write the debug output
+type debugging bool
+
+// write debug output
+func (debug debugging) Printf(format string, args ...interface{}) {
+ if debug {
+ log.Printf(format, args...)
+ }
+}
+
+func (debug debugging) PrintPacket(packet *ber.Packet) {
+ if debug {
+ ber.PrintPacket(packet)
+ }
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/del.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/del.go
new file mode 100644
index 00000000..4fd63dc3
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/del.go
@@ -0,0 +1,84 @@
+//
+// https://tools.ietf.org/html/rfc4511
+//
+// DelRequest ::= [APPLICATION 10] LDAPDN
+
+package ldap
+
+import (
+ "errors"
+ "log"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// DelRequest implements an LDAP deletion request
+type DelRequest struct {
+ // DN is the name of the directory entry to delete
+ DN string
+ // Controls hold optional controls to send with the request
+ Controls []Control
+}
+
+func (d DelRequest) encode() *ber.Packet {
+ request := ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationDelRequest, d.DN, "Del Request")
+ request.Data.Write([]byte(d.DN))
+ return request
+}
+
+// NewDelRequest creates a delete request for the given DN and controls
+func NewDelRequest(DN string,
+ Controls []Control) *DelRequest {
+ return &DelRequest{
+ DN: DN,
+ Controls: Controls,
+ }
+}
+
+// Del executes the given delete request
+func (l *Conn) Del(delRequest *DelRequest) error {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ packet.AppendChild(delRequest.encode())
+ if delRequest.Controls != nil {
+ packet.AppendChild(encodeControls(delRequest.Controls))
+ }
+
+ l.Debug.PrintPacket(packet)
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return err
+ }
+ defer l.finishMessage(msgCtx)
+
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ if packet.Children[1].Tag == ApplicationDelResponse {
+ resultCode, resultDescription := getLDAPResultCode(packet)
+ if resultCode != 0 {
+ return NewError(resultCode, errors.New(resultDescription))
+ }
+ } else {
+ log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
+ }
+
+ l.Debug.Printf("%d: returning", msgCtx.id)
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/dn.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/dn.go
new file mode 100644
index 00000000..34e9023a
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/dn.go
@@ -0,0 +1,247 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// File contains DN parsing functionality
+//
+// https://tools.ietf.org/html/rfc4514
+//
+// distinguishedName = [ relativeDistinguishedName
+// *( COMMA relativeDistinguishedName ) ]
+// relativeDistinguishedName = attributeTypeAndValue
+// *( PLUS attributeTypeAndValue )
+// attributeTypeAndValue = attributeType EQUALS attributeValue
+// attributeType = descr / numericoid
+// attributeValue = string / hexstring
+//
+// ; The following characters are to be escaped when they appear
+// ; in the value to be encoded: ESC, one of <escaped>, leading
+// ; SHARP or SPACE, trailing SPACE, and NULL.
+// string = [ ( leadchar / pair ) [ *( stringchar / pair )
+// ( trailchar / pair ) ] ]
+//
+// leadchar = LUTF1 / UTFMB
+// LUTF1 = %x01-1F / %x21 / %x24-2A / %x2D-3A /
+// %x3D / %x3F-5B / %x5D-7F
+//
+// trailchar = TUTF1 / UTFMB
+// TUTF1 = %x01-1F / %x21 / %x23-2A / %x2D-3A /
+// %x3D / %x3F-5B / %x5D-7F
+//
+// stringchar = SUTF1 / UTFMB
+// SUTF1 = %x01-21 / %x23-2A / %x2D-3A /
+// %x3D / %x3F-5B / %x5D-7F
+//
+// pair = ESC ( ESC / special / hexpair )
+// special = escaped / SPACE / SHARP / EQUALS
+// escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE
+// hexstring = SHARP 1*hexpair
+// hexpair = HEX HEX
+//
+// where the productions <descr>, <numericoid>, <COMMA>, <DQUOTE>,
+// <EQUALS>, <ESC>, <HEX>, <LANGLE>, <NULL>, <PLUS>, <RANGLE>, <SEMI>,
+// <SPACE>, <SHARP>, and <UTFMB> are defined in [RFC4512].
+//
+
+package ldap
+
+import (
+ "bytes"
+ enchex "encoding/hex"
+ "errors"
+ "fmt"
+ "strings"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514
+type AttributeTypeAndValue struct {
+ // Type is the attribute type
+ Type string
+ // Value is the attribute value
+ Value string
+}
+
+// RelativeDN represents a relativeDistinguishedName from https://tools.ietf.org/html/rfc4514
+type RelativeDN struct {
+ Attributes []*AttributeTypeAndValue
+}
+
+// DN represents a distinguishedName from https://tools.ietf.org/html/rfc4514
+type DN struct {
+ RDNs []*RelativeDN
+}
+
+// ParseDN returns a distinguishedName or an error
+func ParseDN(str string) (*DN, error) {
+ dn := new(DN)
+ dn.RDNs = make([]*RelativeDN, 0)
+ rdn := new(RelativeDN)
+ rdn.Attributes = make([]*AttributeTypeAndValue, 0)
+ buffer := bytes.Buffer{}
+ attribute := new(AttributeTypeAndValue)
+ escaping := false
+
+ unescapedTrailingSpaces := 0
+ stringFromBuffer := func() string {
+ s := buffer.String()
+ s = s[0 : len(s)-unescapedTrailingSpaces]
+ buffer.Reset()
+ unescapedTrailingSpaces = 0
+ return s
+ }
+
+ for i := 0; i < len(str); i++ {
+ char := str[i]
+ if escaping {
+ unescapedTrailingSpaces = 0
+ escaping = false
+ switch char {
+ case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\':
+ buffer.WriteByte(char)
+ continue
+ }
+ // Not a special character, assume hex encoded octet
+ if len(str) == i+1 {
+ return nil, errors.New("Got corrupted escaped character")
+ }
+
+ dst := []byte{0}
+ n, err := enchex.Decode([]byte(dst), []byte(str[i:i+2]))
+ if err != nil {
+ return nil, fmt.Errorf("Failed to decode escaped character: %s", err)
+ } else if n != 1 {
+ return nil, fmt.Errorf("Expected 1 byte when un-escaping, got %d", n)
+ }
+ buffer.WriteByte(dst[0])
+ i++
+ } else if char == '\\' {
+ unescapedTrailingSpaces = 0
+ escaping = true
+ } else if char == '=' {
+ attribute.Type = stringFromBuffer()
+ // Special case: If the first character in the value is # the
+ // following data is BER encoded so we can just fast forward
+ // and decode.
+ if len(str) > i+1 && str[i+1] == '#' {
+ i += 2
+ index := strings.IndexAny(str[i:], ",+")
+ data := str
+ if index > 0 {
+ data = str[i : i+index]
+ } else {
+ data = str[i:]
+ }
+ rawBER, err := enchex.DecodeString(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to decode BER encoding: %s", err)
+ }
+ packet := ber.DecodePacket(rawBER)
+ buffer.WriteString(packet.Data.String())
+ i += len(data) - 1
+ }
+ } else if char == ',' || char == '+' {
+ // We're done with this RDN or value, push it
+ if len(attribute.Type) == 0 {
+ return nil, errors.New("incomplete type, value pair")
+ }
+ attribute.Value = stringFromBuffer()
+ rdn.Attributes = append(rdn.Attributes, attribute)
+ attribute = new(AttributeTypeAndValue)
+ if char == ',' {
+ dn.RDNs = append(dn.RDNs, rdn)
+ rdn = new(RelativeDN)
+ rdn.Attributes = make([]*AttributeTypeAndValue, 0)
+ }
+ } else if char == ' ' && buffer.Len() == 0 {
+ // ignore unescaped leading spaces
+ continue
+ } else {
+ if char == ' ' {
+ // Track unescaped spaces in case they are trailing and we need to remove them
+ unescapedTrailingSpaces++
+ } else {
+ // Reset if we see a non-space char
+ unescapedTrailingSpaces = 0
+ }
+ buffer.WriteByte(char)
+ }
+ }
+ if buffer.Len() > 0 {
+ if len(attribute.Type) == 0 {
+ return nil, errors.New("DN ended with incomplete type, value pair")
+ }
+ attribute.Value = stringFromBuffer()
+ rdn.Attributes = append(rdn.Attributes, attribute)
+ dn.RDNs = append(dn.RDNs, rdn)
+ }
+ return dn, nil
+}
+
+// Equal returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
+// Returns true if they have the same number of relative distinguished names
+// and corresponding relative distinguished names (by position) are the same.
+func (d *DN) Equal(other *DN) bool {
+ if len(d.RDNs) != len(other.RDNs) {
+ return false
+ }
+ for i := range d.RDNs {
+ if !d.RDNs[i].Equal(other.RDNs[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// AncestorOf returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN.
+// "ou=widgets,o=acme.com" is an ancestor of "ou=sprockets,ou=widgets,o=acme.com"
+// "ou=widgets,o=acme.com" is not an ancestor of "ou=sprockets,ou=widgets,o=foo.com"
+// "ou=widgets,o=acme.com" is not an ancestor of "ou=widgets,o=acme.com"
+func (d *DN) AncestorOf(other *DN) bool {
+ if len(d.RDNs) >= len(other.RDNs) {
+ return false
+ }
+ // Take the last `len(d.RDNs)` RDNs from the other DN to compare against
+ otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):]
+ for i := range d.RDNs {
+ if !d.RDNs[i].Equal(otherRDNs[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
+// Relative distinguished names are the same if and only if they have the same number of AttributeTypeAndValues
+// and each attribute of the first RDN is the same as the attribute of the second RDN with the same attribute type.
+// The order of attributes is not significant.
+// Case of attribute types is not significant.
+func (r *RelativeDN) Equal(other *RelativeDN) bool {
+ if len(r.Attributes) != len(other.Attributes) {
+ return false
+ }
+ return r.hasAllAttributes(other.Attributes) && other.hasAllAttributes(r.Attributes)
+}
+
+func (r *RelativeDN) hasAllAttributes(attrs []*AttributeTypeAndValue) bool {
+ for _, attr := range attrs {
+ found := false
+ for _, myattr := range r.Attributes {
+ if myattr.Equal(attr) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue
+// Case of the attribute type is not significant
+func (a *AttributeTypeAndValue) Equal(other *AttributeTypeAndValue) bool {
+ return strings.EqualFold(a.Type, other.Type) && a.Value == other.Value
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/doc.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/doc.go
new file mode 100644
index 00000000..f20d39bc
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/doc.go
@@ -0,0 +1,4 @@
+/*
+Package ldap provides basic LDAP v3 functionality.
+*/
+package ldap
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/error.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/error.go
new file mode 100644
index 00000000..4cccb537
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/error.go
@@ -0,0 +1,155 @@
+package ldap
+
+import (
+ "fmt"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// LDAP Result Codes
+const (
+ LDAPResultSuccess = 0
+ LDAPResultOperationsError = 1
+ LDAPResultProtocolError = 2
+ LDAPResultTimeLimitExceeded = 3
+ LDAPResultSizeLimitExceeded = 4
+ LDAPResultCompareFalse = 5
+ LDAPResultCompareTrue = 6
+ LDAPResultAuthMethodNotSupported = 7
+ LDAPResultStrongAuthRequired = 8
+ LDAPResultReferral = 10
+ LDAPResultAdminLimitExceeded = 11
+ LDAPResultUnavailableCriticalExtension = 12
+ LDAPResultConfidentialityRequired = 13
+ LDAPResultSaslBindInProgress = 14
+ LDAPResultNoSuchAttribute = 16
+ LDAPResultUndefinedAttributeType = 17
+ LDAPResultInappropriateMatching = 18
+ LDAPResultConstraintViolation = 19
+ LDAPResultAttributeOrValueExists = 20
+ LDAPResultInvalidAttributeSyntax = 21
+ LDAPResultNoSuchObject = 32
+ LDAPResultAliasProblem = 33
+ LDAPResultInvalidDNSyntax = 34
+ LDAPResultAliasDereferencingProblem = 36
+ LDAPResultInappropriateAuthentication = 48
+ LDAPResultInvalidCredentials = 49
+ LDAPResultInsufficientAccessRights = 50
+ LDAPResultBusy = 51
+ LDAPResultUnavailable = 52
+ LDAPResultUnwillingToPerform = 53
+ LDAPResultLoopDetect = 54
+ LDAPResultNamingViolation = 64
+ LDAPResultObjectClassViolation = 65
+ LDAPResultNotAllowedOnNonLeaf = 66
+ LDAPResultNotAllowedOnRDN = 67
+ LDAPResultEntryAlreadyExists = 68
+ LDAPResultObjectClassModsProhibited = 69
+ LDAPResultAffectsMultipleDSAs = 71
+ LDAPResultOther = 80
+
+ ErrorNetwork = 200
+ ErrorFilterCompile = 201
+ ErrorFilterDecompile = 202
+ ErrorDebugging = 203
+ ErrorUnexpectedMessage = 204
+ ErrorUnexpectedResponse = 205
+)
+
+// LDAPResultCodeMap contains string descriptions for LDAP error codes
+var LDAPResultCodeMap = map[uint8]string{
+ LDAPResultSuccess: "Success",
+ LDAPResultOperationsError: "Operations Error",
+ LDAPResultProtocolError: "Protocol Error",
+ LDAPResultTimeLimitExceeded: "Time Limit Exceeded",
+ LDAPResultSizeLimitExceeded: "Size Limit Exceeded",
+ LDAPResultCompareFalse: "Compare False",
+ LDAPResultCompareTrue: "Compare True",
+ LDAPResultAuthMethodNotSupported: "Auth Method Not Supported",
+ LDAPResultStrongAuthRequired: "Strong Auth Required",
+ LDAPResultReferral: "Referral",
+ LDAPResultAdminLimitExceeded: "Admin Limit Exceeded",
+ LDAPResultUnavailableCriticalExtension: "Unavailable Critical Extension",
+ LDAPResultConfidentialityRequired: "Confidentiality Required",
+ LDAPResultSaslBindInProgress: "Sasl Bind In Progress",
+ LDAPResultNoSuchAttribute: "No Such Attribute",
+ LDAPResultUndefinedAttributeType: "Undefined Attribute Type",
+ LDAPResultInappropriateMatching: "Inappropriate Matching",
+ LDAPResultConstraintViolation: "Constraint Violation",
+ LDAPResultAttributeOrValueExists: "Attribute Or Value Exists",
+ LDAPResultInvalidAttributeSyntax: "Invalid Attribute Syntax",
+ LDAPResultNoSuchObject: "No Such Object",
+ LDAPResultAliasProblem: "Alias Problem",
+ LDAPResultInvalidDNSyntax: "Invalid DN Syntax",
+ LDAPResultAliasDereferencingProblem: "Alias Dereferencing Problem",
+ LDAPResultInappropriateAuthentication: "Inappropriate Authentication",
+ LDAPResultInvalidCredentials: "Invalid Credentials",
+ LDAPResultInsufficientAccessRights: "Insufficient Access Rights",
+ LDAPResultBusy: "Busy",
+ LDAPResultUnavailable: "Unavailable",
+ LDAPResultUnwillingToPerform: "Unwilling To Perform",
+ LDAPResultLoopDetect: "Loop Detect",
+ LDAPResultNamingViolation: "Naming Violation",
+ LDAPResultObjectClassViolation: "Object Class Violation",
+ LDAPResultNotAllowedOnNonLeaf: "Not Allowed On Non Leaf",
+ LDAPResultNotAllowedOnRDN: "Not Allowed On RDN",
+ LDAPResultEntryAlreadyExists: "Entry Already Exists",
+ LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited",
+ LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs",
+ LDAPResultOther: "Other",
+
+ ErrorNetwork: "Network Error",
+ ErrorFilterCompile: "Filter Compile Error",
+ ErrorFilterDecompile: "Filter Decompile Error",
+ ErrorDebugging: "Debugging Error",
+ ErrorUnexpectedMessage: "Unexpected Message",
+ ErrorUnexpectedResponse: "Unexpected Response",
+}
+
+func getLDAPResultCode(packet *ber.Packet) (code uint8, description string) {
+ if packet == nil {
+ return ErrorUnexpectedResponse, "Empty packet"
+ } else if len(packet.Children) >= 2 {
+ response := packet.Children[1]
+ if response == nil {
+ return ErrorUnexpectedResponse, "Empty response in packet"
+ }
+ if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 {
+ // Children[1].Children[2] is the diagnosticMessage which is guaranteed to exist as seen here: https://tools.ietf.org/html/rfc4511#section-4.1.9
+ return uint8(response.Children[0].Value.(int64)), response.Children[2].Value.(string)
+ }
+ }
+
+ return ErrorNetwork, "Invalid packet format"
+}
+
+// Error holds LDAP error information
+type Error struct {
+ // Err is the underlying error
+ Err error
+ // ResultCode is the LDAP error code
+ ResultCode uint8
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error())
+}
+
+// NewError creates an LDAP error with the given code and underlying error
+func NewError(resultCode uint8, err error) error {
+ return &Error{ResultCode: resultCode, Err: err}
+}
+
+// IsErrorWithCode returns true if the given error is an LDAP error with the given result code
+func IsErrorWithCode(err error, desiredResultCode uint8) bool {
+ if err == nil {
+ return false
+ }
+
+ serverError, ok := err.(*Error)
+ if !ok {
+ return false
+ }
+
+ return serverError.ResultCode == desiredResultCode
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/filter.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/filter.go
new file mode 100644
index 00000000..3858a286
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/filter.go
@@ -0,0 +1,469 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ldap
+
+import (
+ "bytes"
+ hexpac "encoding/hex"
+ "errors"
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// Filter choices
+const (
+ FilterAnd = 0
+ FilterOr = 1
+ FilterNot = 2
+ FilterEqualityMatch = 3
+ FilterSubstrings = 4
+ FilterGreaterOrEqual = 5
+ FilterLessOrEqual = 6
+ FilterPresent = 7
+ FilterApproxMatch = 8
+ FilterExtensibleMatch = 9
+)
+
+// FilterMap contains human readable descriptions of Filter choices
+var FilterMap = map[uint64]string{
+ FilterAnd: "And",
+ FilterOr: "Or",
+ FilterNot: "Not",
+ FilterEqualityMatch: "Equality Match",
+ FilterSubstrings: "Substrings",
+ FilterGreaterOrEqual: "Greater Or Equal",
+ FilterLessOrEqual: "Less Or Equal",
+ FilterPresent: "Present",
+ FilterApproxMatch: "Approx Match",
+ FilterExtensibleMatch: "Extensible Match",
+}
+
+// SubstringFilter options
+const (
+ FilterSubstringsInitial = 0
+ FilterSubstringsAny = 1
+ FilterSubstringsFinal = 2
+)
+
+// FilterSubstringsMap contains human readable descriptions of SubstringFilter choices
+var FilterSubstringsMap = map[uint64]string{
+ FilterSubstringsInitial: "Substrings Initial",
+ FilterSubstringsAny: "Substrings Any",
+ FilterSubstringsFinal: "Substrings Final",
+}
+
+// MatchingRuleAssertion choices
+const (
+ MatchingRuleAssertionMatchingRule = 1
+ MatchingRuleAssertionType = 2
+ MatchingRuleAssertionMatchValue = 3
+ MatchingRuleAssertionDNAttributes = 4
+)
+
+// MatchingRuleAssertionMap contains human readable descriptions of MatchingRuleAssertion choices
+var MatchingRuleAssertionMap = map[uint64]string{
+ MatchingRuleAssertionMatchingRule: "Matching Rule Assertion Matching Rule",
+ MatchingRuleAssertionType: "Matching Rule Assertion Type",
+ MatchingRuleAssertionMatchValue: "Matching Rule Assertion Match Value",
+ MatchingRuleAssertionDNAttributes: "Matching Rule Assertion DN Attributes",
+}
+
+// CompileFilter converts a string representation of a filter into a BER-encoded packet
+func CompileFilter(filter string) (*ber.Packet, error) {
+ if len(filter) == 0 || filter[0] != '(' {
+ return nil, NewError(ErrorFilterCompile, errors.New("ldap: filter does not start with an '('"))
+ }
+ packet, pos, err := compileFilter(filter, 1)
+ if err != nil {
+ return nil, err
+ }
+ switch {
+ case pos > len(filter):
+ return nil, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
+ case pos < len(filter):
+ return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:])))
+ }
+ return packet, nil
+}
+
+// DecompileFilter converts a packet representation of a filter into a string representation
+func DecompileFilter(packet *ber.Packet) (ret string, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = NewError(ErrorFilterDecompile, errors.New("ldap: error decompiling filter"))
+ }
+ }()
+ ret = "("
+ err = nil
+ childStr := ""
+
+ switch packet.Tag {
+ case FilterAnd:
+ ret += "&"
+ for _, child := range packet.Children {
+ childStr, err = DecompileFilter(child)
+ if err != nil {
+ return
+ }
+ ret += childStr
+ }
+ case FilterOr:
+ ret += "|"
+ for _, child := range packet.Children {
+ childStr, err = DecompileFilter(child)
+ if err != nil {
+ return
+ }
+ ret += childStr
+ }
+ case FilterNot:
+ ret += "!"
+ childStr, err = DecompileFilter(packet.Children[0])
+ if err != nil {
+ return
+ }
+ ret += childStr
+
+ case FilterSubstrings:
+ ret += ber.DecodeString(packet.Children[0].Data.Bytes())
+ ret += "="
+ for i, child := range packet.Children[1].Children {
+ if i == 0 && child.Tag != FilterSubstringsInitial {
+ ret += "*"
+ }
+ ret += EscapeFilter(ber.DecodeString(child.Data.Bytes()))
+ if child.Tag != FilterSubstringsFinal {
+ ret += "*"
+ }
+ }
+ case FilterEqualityMatch:
+ ret += ber.DecodeString(packet.Children[0].Data.Bytes())
+ ret += "="
+ ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
+ case FilterGreaterOrEqual:
+ ret += ber.DecodeString(packet.Children[0].Data.Bytes())
+ ret += ">="
+ ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
+ case FilterLessOrEqual:
+ ret += ber.DecodeString(packet.Children[0].Data.Bytes())
+ ret += "<="
+ ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
+ case FilterPresent:
+ ret += ber.DecodeString(packet.Data.Bytes())
+ ret += "=*"
+ case FilterApproxMatch:
+ ret += ber.DecodeString(packet.Children[0].Data.Bytes())
+ ret += "~="
+ ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
+ case FilterExtensibleMatch:
+ attr := ""
+ dnAttributes := false
+ matchingRule := ""
+ value := ""
+
+ for _, child := range packet.Children {
+ switch child.Tag {
+ case MatchingRuleAssertionMatchingRule:
+ matchingRule = ber.DecodeString(child.Data.Bytes())
+ case MatchingRuleAssertionType:
+ attr = ber.DecodeString(child.Data.Bytes())
+ case MatchingRuleAssertionMatchValue:
+ value = ber.DecodeString(child.Data.Bytes())
+ case MatchingRuleAssertionDNAttributes:
+ dnAttributes = child.Value.(bool)
+ }
+ }
+
+ if len(attr) > 0 {
+ ret += attr
+ }
+ if dnAttributes {
+ ret += ":dn"
+ }
+ if len(matchingRule) > 0 {
+ ret += ":"
+ ret += matchingRule
+ }
+ ret += ":="
+ ret += EscapeFilter(value)
+ }
+
+ ret += ")"
+ return
+}
+
+func compileFilterSet(filter string, pos int, parent *ber.Packet) (int, error) {
+ for pos < len(filter) && filter[pos] == '(' {
+ child, newPos, err := compileFilter(filter, pos+1)
+ if err != nil {
+ return pos, err
+ }
+ pos = newPos
+ parent.AppendChild(child)
+ }
+ if pos == len(filter) {
+ return pos, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
+ }
+
+ return pos + 1, nil
+}
+
+func compileFilter(filter string, pos int) (*ber.Packet, int, error) {
+ var (
+ packet *ber.Packet
+ err error
+ )
+
+ defer func() {
+ if r := recover(); r != nil {
+ err = NewError(ErrorFilterCompile, errors.New("ldap: error compiling filter"))
+ }
+ }()
+ newPos := pos
+
+ currentRune, currentWidth := utf8.DecodeRuneInString(filter[newPos:])
+
+ switch currentRune {
+ case utf8.RuneError:
+ return nil, 0, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
+ case '(':
+ packet, newPos, err = compileFilter(filter, pos+currentWidth)
+ newPos++
+ return packet, newPos, err
+ case '&':
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterAnd, nil, FilterMap[FilterAnd])
+ newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
+ return packet, newPos, err
+ case '|':
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterOr, nil, FilterMap[FilterOr])
+ newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
+ return packet, newPos, err
+ case '!':
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterNot, nil, FilterMap[FilterNot])
+ var child *ber.Packet
+ child, newPos, err = compileFilter(filter, pos+currentWidth)
+ packet.AppendChild(child)
+ return packet, newPos, err
+ default:
+ const (
+ stateReadingAttr = 0
+ stateReadingExtensibleMatchingRule = 1
+ stateReadingCondition = 2
+ )
+
+ state := stateReadingAttr
+
+ attribute := ""
+ extensibleDNAttributes := false
+ extensibleMatchingRule := ""
+ condition := ""
+
+ for newPos < len(filter) {
+ remainingFilter := filter[newPos:]
+ currentRune, currentWidth = utf8.DecodeRuneInString(remainingFilter)
+ if currentRune == ')' {
+ break
+ }
+ if currentRune == utf8.RuneError {
+ return packet, newPos, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
+ }
+
+ switch state {
+ case stateReadingAttr:
+ switch {
+ // Extensible rule, with only DN-matching
+ case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:="):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
+ extensibleDNAttributes = true
+ state = stateReadingCondition
+ newPos += 5
+
+ // Extensible rule, with DN-matching and a matching OID
+ case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:"):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
+ extensibleDNAttributes = true
+ state = stateReadingExtensibleMatchingRule
+ newPos += 4
+
+ // Extensible rule, with attr only
+ case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
+ state = stateReadingCondition
+ newPos += 2
+
+ // Extensible rule, with no DN attribute matching
+ case currentRune == ':':
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
+ state = stateReadingExtensibleMatchingRule
+ newPos++
+
+ // Equality condition
+ case currentRune == '=':
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterEqualityMatch, nil, FilterMap[FilterEqualityMatch])
+ state = stateReadingCondition
+ newPos++
+
+ // Greater-than or equal
+ case currentRune == '>' && strings.HasPrefix(remainingFilter, ">="):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterGreaterOrEqual, nil, FilterMap[FilterGreaterOrEqual])
+ state = stateReadingCondition
+ newPos += 2
+
+ // Less-than or equal
+ case currentRune == '<' && strings.HasPrefix(remainingFilter, "<="):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterLessOrEqual, nil, FilterMap[FilterLessOrEqual])
+ state = stateReadingCondition
+ newPos += 2
+
+ // Approx
+ case currentRune == '~' && strings.HasPrefix(remainingFilter, "~="):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterApproxMatch, nil, FilterMap[FilterApproxMatch])
+ state = stateReadingCondition
+ newPos += 2
+
+ // Still reading the attribute name
+ default:
+ attribute += fmt.Sprintf("%c", currentRune)
+ newPos += currentWidth
+ }
+
+ case stateReadingExtensibleMatchingRule:
+ switch {
+
+ // Matching rule OID is done
+ case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
+ state = stateReadingCondition
+ newPos += 2
+
+ // Still reading the matching rule oid
+ default:
+ extensibleMatchingRule += fmt.Sprintf("%c", currentRune)
+ newPos += currentWidth
+ }
+
+ case stateReadingCondition:
+ // append to the condition
+ condition += fmt.Sprintf("%c", currentRune)
+ newPos += currentWidth
+ }
+ }
+
+ if newPos == len(filter) {
+ err = NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
+ return packet, newPos, err
+ }
+ if packet == nil {
+ err = NewError(ErrorFilterCompile, errors.New("ldap: error parsing filter"))
+ return packet, newPos, err
+ }
+
+ switch {
+ case packet.Tag == FilterExtensibleMatch:
+ // MatchingRuleAssertion ::= SEQUENCE {
+ // matchingRule [1] MatchingRuleID OPTIONAL,
+ // type [2] AttributeDescription OPTIONAL,
+ // matchValue [3] AssertionValue,
+ // dnAttributes [4] BOOLEAN DEFAULT FALSE
+ // }
+
+ // Include the matching rule oid, if specified
+ if len(extensibleMatchingRule) > 0 {
+ packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchingRule, extensibleMatchingRule, MatchingRuleAssertionMap[MatchingRuleAssertionMatchingRule]))
+ }
+
+ // Include the attribute, if specified
+ if len(attribute) > 0 {
+ packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionType, attribute, MatchingRuleAssertionMap[MatchingRuleAssertionType]))
+ }
+
+ // Add the value (only required child)
+ encodedString, encodeErr := escapedStringToEncodedBytes(condition)
+ if encodeErr != nil {
+ return packet, newPos, encodeErr
+ }
+ packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchValue, encodedString, MatchingRuleAssertionMap[MatchingRuleAssertionMatchValue]))
+
+ // Defaults to false, so only include in the sequence if true
+ if extensibleDNAttributes {
+ packet.AppendChild(ber.NewBoolean(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionDNAttributes, extensibleDNAttributes, MatchingRuleAssertionMap[MatchingRuleAssertionDNAttributes]))
+ }
+
+ case packet.Tag == FilterEqualityMatch && condition == "*":
+ packet = ber.NewString(ber.ClassContext, ber.TypePrimitive, FilterPresent, attribute, FilterMap[FilterPresent])
+ case packet.Tag == FilterEqualityMatch && strings.Contains(condition, "*"):
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
+ packet.Tag = FilterSubstrings
+ packet.Description = FilterMap[uint64(packet.Tag)]
+ seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Substrings")
+ parts := strings.Split(condition, "*")
+ for i, part := range parts {
+ if part == "" {
+ continue
+ }
+ var tag ber.Tag
+ switch i {
+ case 0:
+ tag = FilterSubstringsInitial
+ case len(parts) - 1:
+ tag = FilterSubstringsFinal
+ default:
+ tag = FilterSubstringsAny
+ }
+ encodedString, encodeErr := escapedStringToEncodedBytes(part)
+ if encodeErr != nil {
+ return packet, newPos, encodeErr
+ }
+ seq.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, tag, encodedString, FilterSubstringsMap[uint64(tag)]))
+ }
+ packet.AppendChild(seq)
+ default:
+ encodedString, encodeErr := escapedStringToEncodedBytes(condition)
+ if encodeErr != nil {
+ return packet, newPos, encodeErr
+ }
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, encodedString, "Condition"))
+ }
+
+ newPos += currentWidth
+ return packet, newPos, err
+ }
+}
+
+// Convert from "ABC\xx\xx\xx" form to literal bytes for transport
+func escapedStringToEncodedBytes(escapedString string) (string, error) {
+ var buffer bytes.Buffer
+ i := 0
+ for i < len(escapedString) {
+ currentRune, currentWidth := utf8.DecodeRuneInString(escapedString[i:])
+ if currentRune == utf8.RuneError {
+ return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", i))
+ }
+
+ // Check for escaped hex characters and convert them to their literal value for transport.
+ if currentRune == '\\' {
+ // http://tools.ietf.org/search/rfc4515
+ // \ (%x5C) is not a valid character unless it is followed by two HEX characters due to not
+ // being a member of UTF1SUBSET.
+ if i+2 > len(escapedString) {
+ return "", NewError(ErrorFilterCompile, errors.New("ldap: missing characters for escape in filter"))
+ }
+ escByte, decodeErr := hexpac.DecodeString(escapedString[i+1 : i+3])
+ if decodeErr != nil {
+ return "", NewError(ErrorFilterCompile, errors.New("ldap: invalid characters for escape in filter"))
+ }
+ buffer.WriteByte(escByte[0])
+ i += 2 // +1 from end of loop, so 3 total for \xx.
+ } else {
+ buffer.WriteRune(currentRune)
+ }
+
+ i += currentWidth
+ }
+ return buffer.String(), nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/ldap.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/ldap.go
new file mode 100644
index 00000000..49692475
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/ldap.go
@@ -0,0 +1,320 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ldap
+
+import (
+ "errors"
+ "io/ioutil"
+ "os"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// LDAP Application Codes
+const (
+ ApplicationBindRequest = 0
+ ApplicationBindResponse = 1
+ ApplicationUnbindRequest = 2
+ ApplicationSearchRequest = 3
+ ApplicationSearchResultEntry = 4
+ ApplicationSearchResultDone = 5
+ ApplicationModifyRequest = 6
+ ApplicationModifyResponse = 7
+ ApplicationAddRequest = 8
+ ApplicationAddResponse = 9
+ ApplicationDelRequest = 10
+ ApplicationDelResponse = 11
+ ApplicationModifyDNRequest = 12
+ ApplicationModifyDNResponse = 13
+ ApplicationCompareRequest = 14
+ ApplicationCompareResponse = 15
+ ApplicationAbandonRequest = 16
+ ApplicationSearchResultReference = 19
+ ApplicationExtendedRequest = 23
+ ApplicationExtendedResponse = 24
+)
+
+// ApplicationMap contains human readable descriptions of LDAP Application Codes
+var ApplicationMap = map[uint8]string{
+ ApplicationBindRequest: "Bind Request",
+ ApplicationBindResponse: "Bind Response",
+ ApplicationUnbindRequest: "Unbind Request",
+ ApplicationSearchRequest: "Search Request",
+ ApplicationSearchResultEntry: "Search Result Entry",
+ ApplicationSearchResultDone: "Search Result Done",
+ ApplicationModifyRequest: "Modify Request",
+ ApplicationModifyResponse: "Modify Response",
+ ApplicationAddRequest: "Add Request",
+ ApplicationAddResponse: "Add Response",
+ ApplicationDelRequest: "Del Request",
+ ApplicationDelResponse: "Del Response",
+ ApplicationModifyDNRequest: "Modify DN Request",
+ ApplicationModifyDNResponse: "Modify DN Response",
+ ApplicationCompareRequest: "Compare Request",
+ ApplicationCompareResponse: "Compare Response",
+ ApplicationAbandonRequest: "Abandon Request",
+ ApplicationSearchResultReference: "Search Result Reference",
+ ApplicationExtendedRequest: "Extended Request",
+ ApplicationExtendedResponse: "Extended Response",
+}
+
+// Ldap Behera Password Policy Draft 10 (https://tools.ietf.org/html/draft-behera-ldap-password-policy-10)
+const (
+ BeheraPasswordExpired = 0
+ BeheraAccountLocked = 1
+ BeheraChangeAfterReset = 2
+ BeheraPasswordModNotAllowed = 3
+ BeheraMustSupplyOldPassword = 4
+ BeheraInsufficientPasswordQuality = 5
+ BeheraPasswordTooShort = 6
+ BeheraPasswordTooYoung = 7
+ BeheraPasswordInHistory = 8
+)
+
+// BeheraPasswordPolicyErrorMap contains human readable descriptions of Behera Password Policy error codes
+var BeheraPasswordPolicyErrorMap = map[int8]string{
+ BeheraPasswordExpired: "Password expired",
+ BeheraAccountLocked: "Account locked",
+ BeheraChangeAfterReset: "Password must be changed",
+ BeheraPasswordModNotAllowed: "Policy prevents password modification",
+ BeheraMustSupplyOldPassword: "Policy requires old password in order to change password",
+ BeheraInsufficientPasswordQuality: "Password fails quality checks",
+ BeheraPasswordTooShort: "Password is too short for policy",
+ BeheraPasswordTooYoung: "Password has been changed too recently",
+ BeheraPasswordInHistory: "New password is in list of old passwords",
+}
+
+// Adds descriptions to an LDAP Response packet for debugging
+func addLDAPDescriptions(packet *ber.Packet) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = NewError(ErrorDebugging, errors.New("ldap: cannot process packet to add descriptions"))
+ }
+ }()
+ packet.Description = "LDAP Response"
+ packet.Children[0].Description = "Message ID"
+
+ application := uint8(packet.Children[1].Tag)
+ packet.Children[1].Description = ApplicationMap[application]
+
+ switch application {
+ case ApplicationBindRequest:
+ addRequestDescriptions(packet)
+ case ApplicationBindResponse:
+ addDefaultLDAPResponseDescriptions(packet)
+ case ApplicationUnbindRequest:
+ addRequestDescriptions(packet)
+ case ApplicationSearchRequest:
+ addRequestDescriptions(packet)
+ case ApplicationSearchResultEntry:
+ packet.Children[1].Children[0].Description = "Object Name"
+ packet.Children[1].Children[1].Description = "Attributes"
+ for _, child := range packet.Children[1].Children[1].Children {
+ child.Description = "Attribute"
+ child.Children[0].Description = "Attribute Name"
+ child.Children[1].Description = "Attribute Values"
+ for _, grandchild := range child.Children[1].Children {
+ grandchild.Description = "Attribute Value"
+ }
+ }
+ if len(packet.Children) == 3 {
+ addControlDescriptions(packet.Children[2])
+ }
+ case ApplicationSearchResultDone:
+ addDefaultLDAPResponseDescriptions(packet)
+ case ApplicationModifyRequest:
+ addRequestDescriptions(packet)
+ case ApplicationModifyResponse:
+ case ApplicationAddRequest:
+ addRequestDescriptions(packet)
+ case ApplicationAddResponse:
+ case ApplicationDelRequest:
+ addRequestDescriptions(packet)
+ case ApplicationDelResponse:
+ case ApplicationModifyDNRequest:
+ addRequestDescriptions(packet)
+ case ApplicationModifyDNResponse:
+ case ApplicationCompareRequest:
+ addRequestDescriptions(packet)
+ case ApplicationCompareResponse:
+ case ApplicationAbandonRequest:
+ addRequestDescriptions(packet)
+ case ApplicationSearchResultReference:
+ case ApplicationExtendedRequest:
+ addRequestDescriptions(packet)
+ case ApplicationExtendedResponse:
+ }
+
+ return nil
+}
+
+func addControlDescriptions(packet *ber.Packet) {
+ packet.Description = "Controls"
+ for _, child := range packet.Children {
+ var value *ber.Packet
+ controlType := ""
+ child.Description = "Control"
+ switch len(child.Children) {
+ case 0:
+ // at least one child is required for control type
+ continue
+
+ case 1:
+ // just type, no criticality or value
+ controlType = child.Children[0].Value.(string)
+ child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
+
+ case 2:
+ controlType = child.Children[0].Value.(string)
+ child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
+ // Children[1] could be criticality or value (both are optional)
+ // duck-type on whether this is a boolean
+ if _, ok := child.Children[1].Value.(bool); ok {
+ child.Children[1].Description = "Criticality"
+ } else {
+ child.Children[1].Description = "Control Value"
+ value = child.Children[1]
+ }
+
+ case 3:
+ // criticality and value present
+ controlType = child.Children[0].Value.(string)
+ child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
+ child.Children[1].Description = "Criticality"
+ child.Children[2].Description = "Control Value"
+ value = child.Children[2]
+
+ default:
+ // more than 3 children is invalid
+ continue
+ }
+ if value == nil {
+ continue
+ }
+ switch controlType {
+ case ControlTypePaging:
+ value.Description += " (Paging)"
+ if value.Value != nil {
+ valueChildren := ber.DecodePacket(value.Data.Bytes())
+ value.Data.Truncate(0)
+ value.Value = nil
+ valueChildren.Children[1].Value = valueChildren.Children[1].Data.Bytes()
+ value.AppendChild(valueChildren)
+ }
+ value.Children[0].Description = "Real Search Control Value"
+ value.Children[0].Children[0].Description = "Paging Size"
+ value.Children[0].Children[1].Description = "Cookie"
+
+ case ControlTypeBeheraPasswordPolicy:
+ value.Description += " (Password Policy - Behera Draft)"
+ if value.Value != nil {
+ valueChildren := ber.DecodePacket(value.Data.Bytes())
+ value.Data.Truncate(0)
+ value.Value = nil
+ value.AppendChild(valueChildren)
+ }
+ sequence := value.Children[0]
+ for _, child := range sequence.Children {
+ if child.Tag == 0 {
+ //Warning
+ warningPacket := child.Children[0]
+ packet := ber.DecodePacket(warningPacket.Data.Bytes())
+ val, ok := packet.Value.(int64)
+ if ok {
+ if warningPacket.Tag == 0 {
+ //timeBeforeExpiration
+ value.Description += " (TimeBeforeExpiration)"
+ warningPacket.Value = val
+ } else if warningPacket.Tag == 1 {
+ //graceAuthNsRemaining
+ value.Description += " (GraceAuthNsRemaining)"
+ warningPacket.Value = val
+ }
+ }
+ } else if child.Tag == 1 {
+ // Error
+ packet := ber.DecodePacket(child.Data.Bytes())
+ val, ok := packet.Value.(int8)
+ if !ok {
+ val = -1
+ }
+ child.Description = "Error"
+ child.Value = val
+ }
+ }
+ }
+ }
+}
+
+func addRequestDescriptions(packet *ber.Packet) {
+ packet.Description = "LDAP Request"
+ packet.Children[0].Description = "Message ID"
+ packet.Children[1].Description = ApplicationMap[uint8(packet.Children[1].Tag)]
+ if len(packet.Children) == 3 {
+ addControlDescriptions(packet.Children[2])
+ }
+}
+
+func addDefaultLDAPResponseDescriptions(packet *ber.Packet) {
+ resultCode, _ := getLDAPResultCode(packet)
+ packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[resultCode] + ")"
+ packet.Children[1].Children[1].Description = "Matched DN"
+ packet.Children[1].Children[2].Description = "Error Message"
+ if len(packet.Children[1].Children) > 3 {
+ packet.Children[1].Children[3].Description = "Referral"
+ }
+ if len(packet.Children) == 3 {
+ addControlDescriptions(packet.Children[2])
+ }
+}
+
+// DebugBinaryFile reads and prints packets from the given filename
+func DebugBinaryFile(fileName string) error {
+ file, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ return NewError(ErrorDebugging, err)
+ }
+ ber.PrintBytes(os.Stdout, file, "")
+ packet := ber.DecodePacket(file)
+ addLDAPDescriptions(packet)
+ ber.PrintPacket(packet)
+
+ return nil
+}
+
+var hex = "0123456789abcdef"
+
+func mustEscape(c byte) bool {
+ return c > 0x7f || c == '(' || c == ')' || c == '\\' || c == '*' || c == 0
+}
+
+// EscapeFilter escapes from the provided LDAP filter string the special
+// characters in the set `()*\` and those out of the range 0 < c < 0x80,
+// as defined in RFC4515.
+func EscapeFilter(filter string) string {
+ escape := 0
+ for i := 0; i < len(filter); i++ {
+ if mustEscape(filter[i]) {
+ escape++
+ }
+ }
+ if escape == 0 {
+ return filter
+ }
+ buf := make([]byte, len(filter)+escape*2)
+ for i, j := 0, 0; i < len(filter); i++ {
+ c := filter[i]
+ if mustEscape(c) {
+ buf[j+0] = '\\'
+ buf[j+1] = hex[c>>4]
+ buf[j+2] = hex[c&0xf]
+ j += 3
+ } else {
+ buf[j] = c
+ j++
+ }
+ }
+ return string(buf)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/modify.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/modify.go
new file mode 100644
index 00000000..e4ab6cef
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/modify.go
@@ -0,0 +1,170 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// File contains Modify functionality
+//
+// https://tools.ietf.org/html/rfc4511
+//
+// ModifyRequest ::= [APPLICATION 6] SEQUENCE {
+// object LDAPDN,
+// changes SEQUENCE OF change SEQUENCE {
+// operation ENUMERATED {
+// add (0),
+// delete (1),
+// replace (2),
+// ... },
+// modification PartialAttribute } }
+//
+// PartialAttribute ::= SEQUENCE {
+// type AttributeDescription,
+// vals SET OF value AttributeValue }
+//
+// AttributeDescription ::= LDAPString
+// -- Constrained to <attributedescription>
+// -- [RFC4512]
+//
+// AttributeValue ::= OCTET STRING
+//
+
+package ldap
+
+import (
+ "errors"
+ "log"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// Change operation choices
+const (
+ AddAttribute = 0
+ DeleteAttribute = 1
+ ReplaceAttribute = 2
+)
+
+// PartialAttribute for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
+type PartialAttribute struct {
+ // Type is the type of the partial attribute
+ Type string
+ // Vals are the values of the partial attribute
+ Vals []string
+}
+
+func (p *PartialAttribute) encode() *ber.Packet {
+ seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "PartialAttribute")
+ seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, p.Type, "Type"))
+ set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
+ for _, value := range p.Vals {
+ set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
+ }
+ seq.AppendChild(set)
+ return seq
+}
+
+// ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
+type ModifyRequest struct {
+ // DN is the distinguishedName of the directory entry to modify
+ DN string
+ // AddAttributes contain the attributes to add
+ AddAttributes []PartialAttribute
+ // DeleteAttributes contain the attributes to delete
+ DeleteAttributes []PartialAttribute
+ // ReplaceAttributes contain the attributes to replace
+ ReplaceAttributes []PartialAttribute
+}
+
+// Add inserts the given attribute to the list of attributes to add
+func (m *ModifyRequest) Add(attrType string, attrVals []string) {
+ m.AddAttributes = append(m.AddAttributes, PartialAttribute{Type: attrType, Vals: attrVals})
+}
+
+// Delete inserts the given attribute to the list of attributes to delete
+func (m *ModifyRequest) Delete(attrType string, attrVals []string) {
+ m.DeleteAttributes = append(m.DeleteAttributes, PartialAttribute{Type: attrType, Vals: attrVals})
+}
+
+// Replace inserts the given attribute to the list of attributes to replace
+func (m *ModifyRequest) Replace(attrType string, attrVals []string) {
+ m.ReplaceAttributes = append(m.ReplaceAttributes, PartialAttribute{Type: attrType, Vals: attrVals})
+}
+
+func (m ModifyRequest) encode() *ber.Packet {
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyRequest, nil, "Modify Request")
+ request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.DN, "DN"))
+ changes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Changes")
+ for _, attribute := range m.AddAttributes {
+ change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
+ change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(AddAttribute), "Operation"))
+ change.AppendChild(attribute.encode())
+ changes.AppendChild(change)
+ }
+ for _, attribute := range m.DeleteAttributes {
+ change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
+ change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(DeleteAttribute), "Operation"))
+ change.AppendChild(attribute.encode())
+ changes.AppendChild(change)
+ }
+ for _, attribute := range m.ReplaceAttributes {
+ change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
+ change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(ReplaceAttribute), "Operation"))
+ change.AppendChild(attribute.encode())
+ changes.AppendChild(change)
+ }
+ request.AppendChild(changes)
+ return request
+}
+
+// NewModifyRequest creates a modify request for the given DN
+func NewModifyRequest(
+ dn string,
+) *ModifyRequest {
+ return &ModifyRequest{
+ DN: dn,
+ }
+}
+
+// Modify performs the ModifyRequest
+func (l *Conn) Modify(modifyRequest *ModifyRequest) error {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ packet.AppendChild(modifyRequest.encode())
+
+ l.Debug.PrintPacket(packet)
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return err
+ }
+ defer l.finishMessage(msgCtx)
+
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ if packet.Children[1].Tag == ApplicationModifyResponse {
+ resultCode, resultDescription := getLDAPResultCode(packet)
+ if resultCode != 0 {
+ return NewError(resultCode, errors.New(resultDescription))
+ }
+ } else {
+ log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
+ }
+
+ l.Debug.Printf("%d: returning", msgCtx.id)
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/passwdmodify.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/passwdmodify.go
new file mode 100644
index 00000000..7d8246fd
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/passwdmodify.go
@@ -0,0 +1,148 @@
+// This file contains the password modify extended operation as specified in rfc 3062
+//
+// https://tools.ietf.org/html/rfc3062
+//
+
+package ldap
+
+import (
+ "errors"
+ "fmt"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+const (
+ passwordModifyOID = "1.3.6.1.4.1.4203.1.11.1"
+)
+
+// PasswordModifyRequest implements the Password Modify Extended Operation as defined in https://www.ietf.org/rfc/rfc3062.txt
+type PasswordModifyRequest struct {
+ // UserIdentity is an optional string representation of the user associated with the request.
+ // This string may or may not be an LDAPDN [RFC2253].
+ // If no UserIdentity field is present, the request acts up upon the password of the user currently associated with the LDAP session
+ UserIdentity string
+ // OldPassword, if present, contains the user's current password
+ OldPassword string
+ // NewPassword, if present, contains the desired password for this user
+ NewPassword string
+}
+
+// PasswordModifyResult holds the server response to a PasswordModifyRequest
+type PasswordModifyResult struct {
+ // GeneratedPassword holds a password generated by the server, if present
+ GeneratedPassword string
+}
+
+func (r *PasswordModifyRequest) encode() (*ber.Packet, error) {
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Password Modify Extended Operation")
+ request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, passwordModifyOID, "Extended Request Name: Password Modify OID"))
+ extendedRequestValue := ber.Encode(ber.ClassContext, ber.TypePrimitive, 1, nil, "Extended Request Value: Password Modify Request")
+ passwordModifyRequestValue := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Password Modify Request")
+ if r.UserIdentity != "" {
+ passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, r.UserIdentity, "User Identity"))
+ }
+ if r.OldPassword != "" {
+ passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 1, r.OldPassword, "Old Password"))
+ }
+ if r.NewPassword != "" {
+ passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 2, r.NewPassword, "New Password"))
+ }
+
+ extendedRequestValue.AppendChild(passwordModifyRequestValue)
+ request.AppendChild(extendedRequestValue)
+
+ return request, nil
+}
+
+// NewPasswordModifyRequest creates a new PasswordModifyRequest
+//
+// According to the RFC 3602:
+// userIdentity is a string representing the user associated with the request.
+// This string may or may not be an LDAPDN (RFC 2253).
+// If userIdentity is empty then the operation will act on the user associated
+// with the session.
+//
+// oldPassword is the current user's password, it can be empty or it can be
+// needed depending on the session user access rights (usually an administrator
+// can change a user's password without knowing the current one) and the
+// password policy (see pwdSafeModify password policy's attribute)
+//
+// newPassword is the desired user's password. If empty the server can return
+// an error or generate a new password that will be available in the
+// PasswordModifyResult.GeneratedPassword
+//
+func NewPasswordModifyRequest(userIdentity string, oldPassword string, newPassword string) *PasswordModifyRequest {
+ return &PasswordModifyRequest{
+ UserIdentity: userIdentity,
+ OldPassword: oldPassword,
+ NewPassword: newPassword,
+ }
+}
+
+// PasswordModify performs the modification request
+func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+
+ encodedPasswordModifyRequest, err := passwordModifyRequest.encode()
+ if err != nil {
+ return nil, err
+ }
+ packet.AppendChild(encodedPasswordModifyRequest)
+
+ l.Debug.PrintPacket(packet)
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return nil, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ result := &PasswordModifyResult{}
+
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return nil, err
+ }
+
+ if packet == nil {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve message"))
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return nil, err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ if packet.Children[1].Tag == ApplicationExtendedResponse {
+ resultCode, resultDescription := getLDAPResultCode(packet)
+ if resultCode != 0 {
+ return nil, NewError(resultCode, errors.New(resultDescription))
+ }
+ } else {
+ return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("Unexpected Response: %d", packet.Children[1].Tag))
+ }
+
+ extendedResponse := packet.Children[1]
+ for _, child := range extendedResponse.Children {
+ if child.Tag == 11 {
+ passwordModifyResponseValue := ber.DecodePacket(child.Data.Bytes())
+ if len(passwordModifyResponseValue.Children) == 1 {
+ if passwordModifyResponseValue.Children[0].Tag == 0 {
+ result.GeneratedPassword = ber.DecodeString(passwordModifyResponseValue.Children[0].Data.Bytes())
+ }
+ }
+ }
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/search.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/search.go
new file mode 100644
index 00000000..2a99894c
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/search.go
@@ -0,0 +1,450 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// File contains Search functionality
+//
+// https://tools.ietf.org/html/rfc4511
+//
+// SearchRequest ::= [APPLICATION 3] SEQUENCE {
+// baseObject LDAPDN,
+// scope ENUMERATED {
+// baseObject (0),
+// singleLevel (1),
+// wholeSubtree (2),
+// ... },
+// derefAliases ENUMERATED {
+// neverDerefAliases (0),
+// derefInSearching (1),
+// derefFindingBaseObj (2),
+// derefAlways (3) },
+// sizeLimit INTEGER (0 .. maxInt),
+// timeLimit INTEGER (0 .. maxInt),
+// typesOnly BOOLEAN,
+// filter Filter,
+// attributes AttributeSelection }
+//
+// AttributeSelection ::= SEQUENCE OF selector LDAPString
+// -- The LDAPString is constrained to
+// -- <attributeSelector> in Section 4.5.1.8
+//
+// Filter ::= CHOICE {
+// and [0] SET SIZE (1..MAX) OF filter Filter,
+// or [1] SET SIZE (1..MAX) OF filter Filter,
+// not [2] Filter,
+// equalityMatch [3] AttributeValueAssertion,
+// substrings [4] SubstringFilter,
+// greaterOrEqual [5] AttributeValueAssertion,
+// lessOrEqual [6] AttributeValueAssertion,
+// present [7] AttributeDescription,
+// approxMatch [8] AttributeValueAssertion,
+// extensibleMatch [9] MatchingRuleAssertion,
+// ... }
+//
+// SubstringFilter ::= SEQUENCE {
+// type AttributeDescription,
+// substrings SEQUENCE SIZE (1..MAX) OF substring CHOICE {
+// initial [0] AssertionValue, -- can occur at most once
+// any [1] AssertionValue,
+// final [2] AssertionValue } -- can occur at most once
+// }
+//
+// MatchingRuleAssertion ::= SEQUENCE {
+// matchingRule [1] MatchingRuleId OPTIONAL,
+// type [2] AttributeDescription OPTIONAL,
+// matchValue [3] AssertionValue,
+// dnAttributes [4] BOOLEAN DEFAULT FALSE }
+//
+//
+
+package ldap
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// scope choices
+const (
+ ScopeBaseObject = 0
+ ScopeSingleLevel = 1
+ ScopeWholeSubtree = 2
+)
+
+// ScopeMap contains human readable descriptions of scope choices
+var ScopeMap = map[int]string{
+ ScopeBaseObject: "Base Object",
+ ScopeSingleLevel: "Single Level",
+ ScopeWholeSubtree: "Whole Subtree",
+}
+
+// derefAliases
+const (
+ NeverDerefAliases = 0
+ DerefInSearching = 1
+ DerefFindingBaseObj = 2
+ DerefAlways = 3
+)
+
+// DerefMap contains human readable descriptions of derefAliases choices
+var DerefMap = map[int]string{
+ NeverDerefAliases: "NeverDerefAliases",
+ DerefInSearching: "DerefInSearching",
+ DerefFindingBaseObj: "DerefFindingBaseObj",
+ DerefAlways: "DerefAlways",
+}
+
+// NewEntry returns an Entry object with the specified distinguished name and attribute key-value pairs.
+// The map of attributes is accessed in alphabetical order of the keys in order to ensure that, for the
+// same input map of attributes, the output entry will contain the same order of attributes
+func NewEntry(dn string, attributes map[string][]string) *Entry {
+ var attributeNames []string
+ for attributeName := range attributes {
+ attributeNames = append(attributeNames, attributeName)
+ }
+ sort.Strings(attributeNames)
+
+ var encodedAttributes []*EntryAttribute
+ for _, attributeName := range attributeNames {
+ encodedAttributes = append(encodedAttributes, NewEntryAttribute(attributeName, attributes[attributeName]))
+ }
+ return &Entry{
+ DN: dn,
+ Attributes: encodedAttributes,
+ }
+}
+
+// Entry represents a single search result entry
+type Entry struct {
+ // DN is the distinguished name of the entry
+ DN string
+ // Attributes are the returned attributes for the entry
+ Attributes []*EntryAttribute
+}
+
+// GetAttributeValues returns the values for the named attribute, or an empty list
+func (e *Entry) GetAttributeValues(attribute string) []string {
+ for _, attr := range e.Attributes {
+ if attr.Name == attribute {
+ return attr.Values
+ }
+ }
+ return []string{}
+}
+
+// GetRawAttributeValues returns the byte values for the named attribute, or an empty list
+func (e *Entry) GetRawAttributeValues(attribute string) [][]byte {
+ for _, attr := range e.Attributes {
+ if attr.Name == attribute {
+ return attr.ByteValues
+ }
+ }
+ return [][]byte{}
+}
+
+// GetAttributeValue returns the first value for the named attribute, or ""
+func (e *Entry) GetAttributeValue(attribute string) string {
+ values := e.GetAttributeValues(attribute)
+ if len(values) == 0 {
+ return ""
+ }
+ return values[0]
+}
+
+// GetRawAttributeValue returns the first value for the named attribute, or an empty slice
+func (e *Entry) GetRawAttributeValue(attribute string) []byte {
+ values := e.GetRawAttributeValues(attribute)
+ if len(values) == 0 {
+ return []byte{}
+ }
+ return values[0]
+}
+
+// Print outputs a human-readable description
+func (e *Entry) Print() {
+ fmt.Printf("DN: %s\n", e.DN)
+ for _, attr := range e.Attributes {
+ attr.Print()
+ }
+}
+
+// PrettyPrint outputs a human-readable description indenting
+func (e *Entry) PrettyPrint(indent int) {
+ fmt.Printf("%sDN: %s\n", strings.Repeat(" ", indent), e.DN)
+ for _, attr := range e.Attributes {
+ attr.PrettyPrint(indent + 2)
+ }
+}
+
+// NewEntryAttribute returns a new EntryAttribute with the desired key-value pair
+func NewEntryAttribute(name string, values []string) *EntryAttribute {
+ var bytes [][]byte
+ for _, value := range values {
+ bytes = append(bytes, []byte(value))
+ }
+ return &EntryAttribute{
+ Name: name,
+ Values: values,
+ ByteValues: bytes,
+ }
+}
+
+// EntryAttribute holds a single attribute
+type EntryAttribute struct {
+ // Name is the name of the attribute
+ Name string
+ // Values contain the string values of the attribute
+ Values []string
+ // ByteValues contain the raw values of the attribute
+ ByteValues [][]byte
+}
+
+// Print outputs a human-readable description
+func (e *EntryAttribute) Print() {
+ fmt.Printf("%s: %s\n", e.Name, e.Values)
+}
+
+// PrettyPrint outputs a human-readable description with indenting
+func (e *EntryAttribute) PrettyPrint(indent int) {
+ fmt.Printf("%s%s: %s\n", strings.Repeat(" ", indent), e.Name, e.Values)
+}
+
+// SearchResult holds the server's response to a search request
+type SearchResult struct {
+ // Entries are the returned entries
+ Entries []*Entry
+ // Referrals are the returned referrals
+ Referrals []string
+ // Controls are the returned controls
+ Controls []Control
+}
+
+// Print outputs a human-readable description
+func (s *SearchResult) Print() {
+ for _, entry := range s.Entries {
+ entry.Print()
+ }
+}
+
+// PrettyPrint outputs a human-readable description with indenting
+func (s *SearchResult) PrettyPrint(indent int) {
+ for _, entry := range s.Entries {
+ entry.PrettyPrint(indent)
+ }
+}
+
+// SearchRequest represents a search request to send to the server
+type SearchRequest struct {
+ BaseDN string
+ Scope int
+ DerefAliases int
+ SizeLimit int
+ TimeLimit int
+ TypesOnly bool
+ Filter string
+ Attributes []string
+ Controls []Control
+}
+
+func (s *SearchRequest) encode() (*ber.Packet, error) {
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationSearchRequest, nil, "Search Request")
+ request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, s.BaseDN, "Base DN"))
+ request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.Scope), "Scope"))
+ request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.DerefAliases), "Deref Aliases"))
+ request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.SizeLimit), "Size Limit"))
+ request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.TimeLimit), "Time Limit"))
+ request.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, s.TypesOnly, "Types Only"))
+ // compile and encode filter
+ filterPacket, err := CompileFilter(s.Filter)
+ if err != nil {
+ return nil, err
+ }
+ request.AppendChild(filterPacket)
+ // encode attributes
+ attributesPacket := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
+ for _, attribute := range s.Attributes {
+ attributesPacket.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
+ }
+ request.AppendChild(attributesPacket)
+ return request, nil
+}
+
+// NewSearchRequest creates a new search request
+func NewSearchRequest(
+ BaseDN string,
+ Scope, DerefAliases, SizeLimit, TimeLimit int,
+ TypesOnly bool,
+ Filter string,
+ Attributes []string,
+ Controls []Control,
+) *SearchRequest {
+ return &SearchRequest{
+ BaseDN: BaseDN,
+ Scope: Scope,
+ DerefAliases: DerefAliases,
+ SizeLimit: SizeLimit,
+ TimeLimit: TimeLimit,
+ TypesOnly: TypesOnly,
+ Filter: Filter,
+ Attributes: Attributes,
+ Controls: Controls,
+ }
+}
+
+// SearchWithPaging accepts a search request and desired page size in order to execute LDAP queries to fulfill the
+// search request. All paged LDAP query responses will be buffered and the final result will be returned atomically.
+// The following four cases are possible given the arguments:
+// - given SearchRequest missing a control of type ControlTypePaging: we will add one with the desired paging size
+// - given SearchRequest contains a control of type ControlTypePaging that isn't actually a ControlPaging: fail without issuing any queries
+// - given SearchRequest contains a control of type ControlTypePaging with pagingSize equal to the size requested: no change to the search request
+// - given SearchRequest contains a control of type ControlTypePaging with pagingSize not equal to the size requested: fail without issuing any queries
+// A requested pagingSize of 0 is interpreted as no limit by LDAP servers.
+func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) {
+ var pagingControl *ControlPaging
+
+ control := FindControl(searchRequest.Controls, ControlTypePaging)
+ if control == nil {
+ pagingControl = NewControlPaging(pagingSize)
+ searchRequest.Controls = append(searchRequest.Controls, pagingControl)
+ } else {
+ castControl, ok := control.(*ControlPaging)
+ if !ok {
+ return nil, fmt.Errorf("Expected paging control to be of type *ControlPaging, got %v", control)
+ }
+ if castControl.PagingSize != pagingSize {
+ return nil, fmt.Errorf("Paging size given in search request (%d) conflicts with size given in search call (%d)", castControl.PagingSize, pagingSize)
+ }
+ pagingControl = castControl
+ }
+
+ searchResult := new(SearchResult)
+ for {
+ result, err := l.Search(searchRequest)
+ l.Debug.Printf("Looking for Paging Control...")
+ if err != nil {
+ return searchResult, err
+ }
+ if result == nil {
+ return searchResult, NewError(ErrorNetwork, errors.New("ldap: packet not received"))
+ }
+
+ for _, entry := range result.Entries {
+ searchResult.Entries = append(searchResult.Entries, entry)
+ }
+ for _, referral := range result.Referrals {
+ searchResult.Referrals = append(searchResult.Referrals, referral)
+ }
+ for _, control := range result.Controls {
+ searchResult.Controls = append(searchResult.Controls, control)
+ }
+
+ l.Debug.Printf("Looking for Paging Control...")
+ pagingResult := FindControl(result.Controls, ControlTypePaging)
+ if pagingResult == nil {
+ pagingControl = nil
+ l.Debug.Printf("Could not find paging control. Breaking...")
+ break
+ }
+
+ cookie := pagingResult.(*ControlPaging).Cookie
+ if len(cookie) == 0 {
+ pagingControl = nil
+ l.Debug.Printf("Could not find cookie. Breaking...")
+ break
+ }
+ pagingControl.SetCookie(cookie)
+ }
+
+ if pagingControl != nil {
+ l.Debug.Printf("Abandoning Paging...")
+ pagingControl.PagingSize = 0
+ l.Search(searchRequest)
+ }
+
+ return searchResult, nil
+}
+
+// Search performs the given search request
+func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ // encode search request
+ encodedSearchRequest, err := searchRequest.encode()
+ if err != nil {
+ return nil, err
+ }
+ packet.AppendChild(encodedSearchRequest)
+ // encode search controls
+ if searchRequest.Controls != nil {
+ packet.AppendChild(encodeControls(searchRequest.Controls))
+ }
+
+ l.Debug.PrintPacket(packet)
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return nil, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ result := &SearchResult{
+ Entries: make([]*Entry, 0),
+ Referrals: make([]string, 0),
+ Controls: make([]Control, 0)}
+
+ foundSearchResultDone := false
+ for !foundSearchResultDone {
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return nil, err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return nil, err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ switch packet.Children[1].Tag {
+ case 4:
+ entry := new(Entry)
+ entry.DN = packet.Children[1].Children[0].Value.(string)
+ for _, child := range packet.Children[1].Children[1].Children {
+ attr := new(EntryAttribute)
+ attr.Name = child.Children[0].Value.(string)
+ for _, value := range child.Children[1].Children {
+ attr.Values = append(attr.Values, value.Value.(string))
+ attr.ByteValues = append(attr.ByteValues, value.ByteValue)
+ }
+ entry.Attributes = append(entry.Attributes, attr)
+ }
+ result.Entries = append(result.Entries, entry)
+ case 5:
+ resultCode, resultDescription := getLDAPResultCode(packet)
+ if resultCode != 0 {
+ return result, NewError(resultCode, errors.New(resultDescription))
+ }
+ if len(packet.Children) == 3 {
+ for _, child := range packet.Children[2].Children {
+ result.Controls = append(result.Controls, DecodeControl(child))
+ }
+ }
+ foundSearchResultDone = true
+ case 19:
+ result.Referrals = append(result.Referrals, packet.Children[1].Children[0].Value.(string))
+ }
+ }
+ l.Debug.Printf("%d: returning", msgCtx.id)
+ return result, nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/LICENSE.txt
new file mode 100644
index 00000000..ead98cf0
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/LICENSE.txt
@@ -0,0 +1,897 @@
+Mattermost Licensing
+
+SOFTWARE LICENSING
+
+You are licensed to use compiled versions of the Mattermost platform produced by Mattermost, Inc. under an MIT LICENSE
+
+- See MIT-COMPILED-LICENSE.md included in compiled versions for details
+
+You may be licensed to use source code to create compiled versions not produced by Mattermost, Inc. in one of two ways:
+
+1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or
+2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com
+
+You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/, model/,
+webapp/client, webapp/fonts, webapp/i18n, webapp/images and all subdirectories thereof) under the Apache License v2.0.
+
+We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not
+link to the Mattermost Platform directly, but exclusively uses the Mattermost Admin Tools and Configuration Files, and
+(b) you have not modified, added to or adapted the source code of Mattermost in a way that results in the creation of
+a “modified version” or “work based on” Mattermost as these terms are defined in the AGPL v3.0 license.
+
+MATTERMOST TRADEMARK GUIDELINES
+
+Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organization’s Trademark
+Standards of Use at http://www.mattermost.org/trademark-standards-of-use/. For trademark approval or any questions
+you have about using these trademarks, please email trademark@mattermost.com
+
+------------------------------------------------------------------------------------------------------------------------------
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+------------------------------------------------------------------------------
+
+The software is released under the terms of the GNU Affero General Public
+License, version 3.
+
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/dce.go
new file mode 100644
index 00000000..50a0f2d0
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/dce.go
@@ -0,0 +1,84 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "fmt"
+ "os"
+)
+
+// A Domain represents a Version 2 domain
+type Domain byte
+
+// Domain constants for DCE Security (Version 2) UUIDs.
+const (
+ Person = Domain(0)
+ Group = Domain(1)
+ Org = Domain(2)
+)
+
+// NewDCESecurity returns a DCE Security (Version 2) UUID.
+//
+// The domain should be one of Person, Group or Org.
+// On a POSIX system the id should be the users UID for the Person
+// domain and the users GID for the Group. The meaning of id for
+// the domain Org or on non-POSIX systems is site defined.
+//
+// For a given domain/id pair the same token may be returned for up to
+// 7 minutes and 10 seconds.
+func NewDCESecurity(domain Domain, id uint32) UUID {
+ uuid := NewUUID()
+ if uuid != nil {
+ uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
+ uuid[9] = byte(domain)
+ binary.BigEndian.PutUint32(uuid[0:], id)
+ }
+ return uuid
+}
+
+// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
+// domain with the id returned by os.Getuid.
+//
+// NewDCEPerson(Person, uint32(os.Getuid()))
+func NewDCEPerson() UUID {
+ return NewDCESecurity(Person, uint32(os.Getuid()))
+}
+
+// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
+// domain with the id returned by os.Getgid.
+//
+// NewDCEGroup(Group, uint32(os.Getgid()))
+func NewDCEGroup() UUID {
+ return NewDCESecurity(Group, uint32(os.Getgid()))
+}
+
+// Domain returns the domain for a Version 2 UUID or false.
+func (uuid UUID) Domain() (Domain, bool) {
+ if v, _ := uuid.Version(); v != 2 {
+ return 0, false
+ }
+ return Domain(uuid[9]), true
+}
+
+// Id returns the id for a Version 2 UUID or false.
+func (uuid UUID) Id() (uint32, bool) {
+ if v, _ := uuid.Version(); v != 2 {
+ return 0, false
+ }
+ return binary.BigEndian.Uint32(uuid[0:4]), true
+}
+
+func (d Domain) String() string {
+ switch d {
+ case Person:
+ return "Person"
+ case Group:
+ return "Group"
+ case Org:
+ return "Org"
+ }
+ return fmt.Sprintf("Domain%d", int(d))
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/doc.go
new file mode 100644
index 00000000..d8bd013e
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The uuid package generates and inspects UUIDs.
+//
+// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services.
+package uuid
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/hash.go
new file mode 100644
index 00000000..a0420c1e
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/hash.go
@@ -0,0 +1,53 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "hash"
+)
+
+// Well known Name Space IDs and UUIDs
+var (
+ NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
+ NIL = Parse("00000000-0000-0000-0000-000000000000")
+)
+
+// NewHash returns a new UUID derived from the hash of space concatenated with
+// data generated by h. The hash should be at least 16 byte in length. The
+// first 16 bytes of the hash are used to form the UUID. The version of the
+// UUID will be the lower 4 bits of version. NewHash is used to implement
+// NewMD5 and NewSHA1.
+func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
+ h.Reset()
+ h.Write(space)
+ h.Write([]byte(data))
+ s := h.Sum(nil)
+ uuid := make([]byte, 16)
+ copy(uuid, s)
+ uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
+ return uuid
+}
+
+// NewMD5 returns a new MD5 (Version 3) UUID based on the
+// supplied name space and data.
+//
+// NewHash(md5.New(), space, data, 3)
+func NewMD5(space UUID, data []byte) UUID {
+ return NewHash(md5.New(), space, data, 3)
+}
+
+// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
+// supplied name space and data.
+//
+// NewHash(sha1.New(), space, data, 5)
+func NewSHA1(space UUID, data []byte) UUID {
+ return NewHash(sha1.New(), space, data, 5)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/marshal.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/marshal.go
new file mode 100644
index 00000000..6621dd54
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/marshal.go
@@ -0,0 +1,83 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "errors"
+ "fmt"
+)
+
+// MarshalText implements encoding.TextMarshaler.
+func (u UUID) MarshalText() ([]byte, error) {
+ if len(u) != 16 {
+ return nil, nil
+ }
+ var js [36]byte
+ encodeHex(js[:], u)
+ return js[:], nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (u *UUID) UnmarshalText(data []byte) error {
+ if len(data) == 0 {
+ return nil
+ }
+ id := Parse(string(data))
+ if id == nil {
+ return errors.New("invalid UUID")
+ }
+ *u = id
+ return nil
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (u UUID) MarshalBinary() ([]byte, error) {
+ return u[:], nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (u *UUID) UnmarshalBinary(data []byte) error {
+ if len(data) == 0 {
+ return nil
+ }
+ if len(data) != 16 {
+ return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+ }
+ var id [16]byte
+ copy(id[:], data)
+ *u = id[:]
+ return nil
+}
+
+// MarshalText implements encoding.TextMarshaler.
+func (u Array) MarshalText() ([]byte, error) {
+ var js [36]byte
+ encodeHex(js[:], u[:])
+ return js[:], nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (u *Array) UnmarshalText(data []byte) error {
+ id := Parse(string(data))
+ if id == nil {
+ return errors.New("invalid UUID")
+ }
+ *u = id.Array()
+ return nil
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (u Array) MarshalBinary() ([]byte, error) {
+ return u[:], nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (u *Array) UnmarshalBinary(data []byte) error {
+ if len(data) != 16 {
+ return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+ }
+ copy(u[:], data)
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/node.go
new file mode 100644
index 00000000..42d60da8
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/node.go
@@ -0,0 +1,117 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "net"
+ "sync"
+)
+
+var (
+ nodeMu sync.Mutex
+ interfaces []net.Interface // cached list of interfaces
+ ifname string // name of interface being used
+ nodeID []byte // hardware for version 1 UUIDs
+)
+
+// NodeInterface returns the name of the interface from which the NodeID was
+// derived. The interface "user" is returned if the NodeID was set by
+// SetNodeID.
+func NodeInterface() string {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ return ifname
+}
+
+// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
+// If name is "" then the first usable interface found will be used or a random
+// Node ID will be generated. If a named interface cannot be found then false
+// is returned.
+//
+// SetNodeInterface never fails when name is "".
+func SetNodeInterface(name string) bool {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ return setNodeInterface(name)
+}
+
+func setNodeInterface(name string) bool {
+ if interfaces == nil {
+ var err error
+ interfaces, err = net.Interfaces()
+ if err != nil && name != "" {
+ return false
+ }
+ }
+
+ for _, ifs := range interfaces {
+ if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
+ if setNodeID(ifs.HardwareAddr) {
+ ifname = ifs.Name
+ return true
+ }
+ }
+ }
+
+ // We found no interfaces with a valid hardware address. If name
+ // does not specify a specific interface generate a random Node ID
+ // (section 4.1.6)
+ if name == "" {
+ if nodeID == nil {
+ nodeID = make([]byte, 6)
+ }
+ randomBits(nodeID)
+ return true
+ }
+ return false
+}
+
+// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
+// if not already set.
+func NodeID() []byte {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ if nodeID == nil {
+ setNodeInterface("")
+ }
+ nid := make([]byte, 6)
+ copy(nid, nodeID)
+ return nid
+}
+
+// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
+// of id are used. If id is less than 6 bytes then false is returned and the
+// Node ID is not set.
+func SetNodeID(id []byte) bool {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ if setNodeID(id) {
+ ifname = "user"
+ return true
+ }
+ return false
+}
+
+func setNodeID(id []byte) bool {
+ if len(id) < 6 {
+ return false
+ }
+ if nodeID == nil {
+ nodeID = make([]byte, 6)
+ }
+ copy(nodeID, id)
+ return true
+}
+
+// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
+// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) NodeID() []byte {
+ if len(uuid) != 16 {
+ return nil
+ }
+ node := make([]byte, 6)
+ copy(node, uuid[10:])
+ return node
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/sql.go
new file mode 100644
index 00000000..d015bfd1
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/sql.go
@@ -0,0 +1,66 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "database/sql/driver"
+ "errors"
+ "fmt"
+)
+
+// Scan implements sql.Scanner so UUIDs can be read from databases transparently
+// Currently, database types that map to string and []byte are supported. Please
+// consult database-specific driver documentation for matching types.
+func (uuid *UUID) Scan(src interface{}) error {
+ switch src.(type) {
+ case string:
+ // if an empty UUID comes from a table, we return a null UUID
+ if src.(string) == "" {
+ return nil
+ }
+
+ // see uuid.Parse for required string format
+ parsed := Parse(src.(string))
+
+ if parsed == nil {
+ return errors.New("Scan: invalid UUID format")
+ }
+
+ *uuid = parsed
+ case []byte:
+ b := src.([]byte)
+
+ // if an empty UUID comes from a table, we return a null UUID
+ if len(b) == 0 {
+ return nil
+ }
+
+ // assumes a simple slice of bytes if 16 bytes
+ // otherwise attempts to parse
+ if len(b) == 16 {
+ *uuid = UUID(b)
+ } else {
+ u := Parse(string(b))
+
+ if u == nil {
+ return errors.New("Scan: invalid UUID format")
+ }
+
+ *uuid = u
+ }
+
+ default:
+ return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
+ }
+
+ return nil
+}
+
+// Value implements sql.Valuer so that UUIDs can be written to databases
+// transparently. Currently, UUIDs map to strings. Please consult
+// database-specific driver documentation for matching types.
+func (uuid UUID) Value() (driver.Value, error) {
+ return uuid.String(), nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/time.go
new file mode 100644
index 00000000..eedf2421
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/time.go
@@ -0,0 +1,132 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "sync"
+ "time"
+)
+
+// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
+// 1582.
+type Time int64
+
+const (
+ lillian = 2299160 // Julian day of 15 Oct 1582
+ unix = 2440587 // Julian day of 1 Jan 1970
+ epoch = unix - lillian // Days between epochs
+ g1582 = epoch * 86400 // seconds between epochs
+ g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
+)
+
+var (
+ timeMu sync.Mutex
+ lasttime uint64 // last time we returned
+ clock_seq uint16 // clock sequence for this run
+
+ timeNow = time.Now // for testing
+)
+
+// UnixTime converts t the number of seconds and nanoseconds using the Unix
+// epoch of 1 Jan 1970.
+func (t Time) UnixTime() (sec, nsec int64) {
+ sec = int64(t - g1582ns100)
+ nsec = (sec % 10000000) * 100
+ sec /= 10000000
+ return sec, nsec
+}
+
+// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
+// clock sequence as well as adjusting the clock sequence as needed. An error
+// is returned if the current time cannot be determined.
+func GetTime() (Time, uint16, error) {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ return getTime()
+}
+
+func getTime() (Time, uint16, error) {
+ t := timeNow()
+
+ // If we don't have a clock sequence already, set one.
+ if clock_seq == 0 {
+ setClockSequence(-1)
+ }
+ now := uint64(t.UnixNano()/100) + g1582ns100
+
+ // If time has gone backwards with this clock sequence then we
+ // increment the clock sequence
+ if now <= lasttime {
+ clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
+ }
+ lasttime = now
+ return Time(now), clock_seq, nil
+}
+
+// ClockSequence returns the current clock sequence, generating one if not
+// already set. The clock sequence is only used for Version 1 UUIDs.
+//
+// The uuid package does not use global static storage for the clock sequence or
+// the last time a UUID was generated. Unless SetClockSequence a new random
+// clock sequence is generated the first time a clock sequence is requested by
+// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated
+// for
+func ClockSequence() int {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ return clockSequence()
+}
+
+func clockSequence() int {
+ if clock_seq == 0 {
+ setClockSequence(-1)
+ }
+ return int(clock_seq & 0x3fff)
+}
+
+// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
+// -1 causes a new sequence to be generated.
+func SetClockSequence(seq int) {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ setClockSequence(seq)
+}
+
+func setClockSequence(seq int) {
+ if seq == -1 {
+ var b [2]byte
+ randomBits(b[:]) // clock sequence
+ seq = int(b[0])<<8 | int(b[1])
+ }
+ old_seq := clock_seq
+ clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant
+ if old_seq != clock_seq {
+ lasttime = 0
+ }
+}
+
+// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
+// uuid. It returns false if uuid is not valid. The time is only well defined
+// for version 1 and 2 UUIDs.
+func (uuid UUID) Time() (Time, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ return Time(time), true
+}
+
+// ClockSequence returns the clock sequence encoded in uuid. It returns false
+// if uuid is not valid. The clock sequence is only well defined for version 1
+// and 2 UUIDs.
+func (uuid UUID) ClockSequence() (int, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/util.go
new file mode 100644
index 00000000..fc8e052c
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/util.go
@@ -0,0 +1,43 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// randomBits completely fills slice b with random data.
+func randomBits(b []byte) {
+ if _, err := io.ReadFull(rander, b); err != nil {
+ panic(err.Error()) // rand should never fail
+ }
+}
+
+// xvalues returns the value of a byte as a hexadecimal digit or 255.
+var xvalues = [256]byte{
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+}
+
+// xtob converts the the first two hex bytes of x into a byte.
+func xtob(x string) (byte, bool) {
+ b1 := xvalues[x[0]]
+ b2 := xvalues[x[1]]
+ return (b1 << 4) | b2, b1 != 255 && b2 != 255
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/uuid.go
new file mode 100644
index 00000000..7c643cf0
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/uuid.go
@@ -0,0 +1,201 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// Array is a pass-by-value UUID that can be used as an effecient key in a map.
+type Array [16]byte
+
+// UUID converts uuid into a slice.
+func (uuid Array) UUID() UUID {
+ return uuid[:]
+}
+
+// String returns the string representation of uuid,
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
+func (uuid Array) String() string {
+ return uuid.UUID().String()
+}
+
+// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
+// 4122.
+type UUID []byte
+
+// A Version represents a UUIDs version.
+type Version byte
+
+// A Variant represents a UUIDs variant.
+type Variant byte
+
+// Constants returned by Variant.
+const (
+ Invalid = Variant(iota) // Invalid UUID
+ RFC4122 // The variant specified in RFC4122
+ Reserved // Reserved, NCS backward compatibility.
+ Microsoft // Reserved, Microsoft Corporation backward compatibility.
+ Future // Reserved for future definition.
+)
+
+var rander = rand.Reader // random function
+
+// New returns a new random (version 4) UUID as a string. It is a convenience
+// function for NewRandom().String().
+func New() string {
+ return NewRandom().String()
+}
+
+// Parse decodes s into a UUID or returns nil. Both the UUID form of
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
+func Parse(s string) UUID {
+ if len(s) == 36+9 {
+ if strings.ToLower(s[:9]) != "urn:uuid:" {
+ return nil
+ }
+ s = s[9:]
+ } else if len(s) != 36 {
+ return nil
+ }
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return nil
+ }
+ var uuid [16]byte
+ for i, x := range [16]int{
+ 0, 2, 4, 6,
+ 9, 11,
+ 14, 16,
+ 19, 21,
+ 24, 26, 28, 30, 32, 34} {
+ if v, ok := xtob(s[x:]); !ok {
+ return nil
+ } else {
+ uuid[i] = v
+ }
+ }
+ return uuid[:]
+}
+
+// Equal returns true if uuid1 and uuid2 are equal.
+func Equal(uuid1, uuid2 UUID) bool {
+ return bytes.Equal(uuid1, uuid2)
+}
+
+// Array returns an array representation of uuid that can be used as a map key.
+// Array panics if uuid is not valid.
+func (uuid UUID) Array() Array {
+ if len(uuid) != 16 {
+ panic("invalid uuid")
+ }
+ var a Array
+ copy(a[:], uuid)
+ return a
+}
+
+// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// , or "" if uuid is invalid.
+func (uuid UUID) String() string {
+ if len(uuid) != 16 {
+ return ""
+ }
+ var buf [36]byte
+ encodeHex(buf[:], uuid)
+ return string(buf[:])
+}
+
+// URN returns the RFC 2141 URN form of uuid,
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
+func (uuid UUID) URN() string {
+ if len(uuid) != 16 {
+ return ""
+ }
+ var buf [36 + 9]byte
+ copy(buf[:], "urn:uuid:")
+ encodeHex(buf[9:], uuid)
+ return string(buf[:])
+}
+
+func encodeHex(dst []byte, uuid UUID) {
+ hex.Encode(dst[:], uuid[:4])
+ dst[8] = '-'
+ hex.Encode(dst[9:13], uuid[4:6])
+ dst[13] = '-'
+ hex.Encode(dst[14:18], uuid[6:8])
+ dst[18] = '-'
+ hex.Encode(dst[19:23], uuid[8:10])
+ dst[23] = '-'
+ hex.Encode(dst[24:], uuid[10:])
+}
+
+// Variant returns the variant encoded in uuid. It returns Invalid if
+// uuid is invalid.
+func (uuid UUID) Variant() Variant {
+ if len(uuid) != 16 {
+ return Invalid
+ }
+ switch {
+ case (uuid[8] & 0xc0) == 0x80:
+ return RFC4122
+ case (uuid[8] & 0xe0) == 0xc0:
+ return Microsoft
+ case (uuid[8] & 0xe0) == 0xe0:
+ return Future
+ default:
+ return Reserved
+ }
+}
+
+// Version returns the version of uuid. It returns false if uuid is not
+// valid.
+func (uuid UUID) Version() (Version, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ return Version(uuid[6] >> 4), true
+}
+
+func (v Version) String() string {
+ if v > 15 {
+ return fmt.Sprintf("BAD_VERSION_%d", v)
+ }
+ return fmt.Sprintf("VERSION_%d", v)
+}
+
+func (v Variant) String() string {
+ switch v {
+ case RFC4122:
+ return "RFC4122"
+ case Reserved:
+ return "Reserved"
+ case Microsoft:
+ return "Microsoft"
+ case Future:
+ return "Future"
+ case Invalid:
+ return "Invalid"
+ }
+ return fmt.Sprintf("BadVariant%d", int(v))
+}
+
+// SetRand sets the random number generator to r, which implements io.Reader.
+// If r.Read returns an error when the package requests random data then
+// a panic will be issued.
+//
+// Calling SetRand with nil sets the random number generator to the default
+// generator.
+func SetRand(r io.Reader) {
+ if r == nil {
+ rander = rand.Reader
+ return
+ }
+ rander = r
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/version1.go
new file mode 100644
index 00000000..0127eacf
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/version1.go
@@ -0,0 +1,41 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+)
+
+// NewUUID returns a Version 1 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewUUID returns nil. If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewUUID returns nil.
+func NewUUID() UUID {
+ if nodeID == nil {
+ SetNodeInterface("")
+ }
+
+ now, seq, err := GetTime()
+ if err != nil {
+ return nil
+ }
+
+ uuid := make([]byte, 16)
+
+ time_low := uint32(now & 0xffffffff)
+ time_mid := uint16((now >> 32) & 0xffff)
+ time_hi := uint16((now >> 48) & 0x0fff)
+ time_hi |= 0x1000 // Version 1
+
+ binary.BigEndian.PutUint32(uuid[0:], time_low)
+ binary.BigEndian.PutUint16(uuid[4:], time_mid)
+ binary.BigEndian.PutUint16(uuid[6:], time_hi)
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+ copy(uuid[10:], nodeID)
+
+ return uuid
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/version4.go
new file mode 100644
index 00000000..b3d4a368
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/version4.go
@@ -0,0 +1,25 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+// Random returns a Random (Version 4) UUID or panics.
+//
+// The strength of the UUIDs is based on the strength of the crypto/rand
+// package.
+//
+// A note about uniqueness derived from from the UUID Wikipedia entry:
+//
+// Randomly generated UUIDs have 122 random bits. One's annual risk of being
+// hit by a meteorite is estimated to be one chance in 17 billion, that
+// means the probability is about 0.00000000006 (6 × 10−11),
+// equivalent to the odds of creating a few tens of trillions of UUIDs in a
+// year and having one duplicate.
+func NewRandom() UUID {
+ uuid := make([]byte, 16)
+ randomBits([]byte(uuid))
+ uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+ return uuid
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/LICENSE.txt
new file mode 100644
index 00000000..ead98cf0
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/LICENSE.txt
@@ -0,0 +1,897 @@
+Mattermost Licensing
+
+SOFTWARE LICENSING
+
+You are licensed to use compiled versions of the Mattermost platform produced by Mattermost, Inc. under an MIT LICENSE
+
+- See MIT-COMPILED-LICENSE.md included in compiled versions for details
+
+You may be licensed to use source code to create compiled versions not produced by Mattermost, Inc. in one of two ways:
+
+1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or
+2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com
+
+You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/, model/,
+webapp/client, webapp/fonts, webapp/i18n, webapp/images and all subdirectories thereof) under the Apache License v2.0.
+
+We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not
+link to the Mattermost Platform directly, but exclusively uses the Mattermost Admin Tools and Configuration Files, and
+(b) you have not modified, added to or adapted the source code of Mattermost in a way that results in the creation of
+a “modified version” or “work based on” Mattermost as these terms are defined in the AGPL v3.0 license.
+
+MATTERMOST TRADEMARK GUIDELINES
+
+Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organization’s Trademark
+Standards of Use at http://www.mattermost.org/trademark-standards-of-use/. For trademark approval or any questions
+you have about using these trademarks, please email trademark@mattermost.com
+
+------------------------------------------------------------------------------------------------------------------------------
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+------------------------------------------------------------------------------
+
+The software is released under the terms of the GNU Affero General Public
+License, version 3.
+
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/base64.go
new file mode 100644
index 00000000..fc311609
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/base64.go
@@ -0,0 +1,35 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bcrypt
+
+import "encoding/base64"
+
+const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+
+var bcEncoding = base64.NewEncoding(alphabet)
+
+func base64Encode(src []byte) []byte {
+ n := bcEncoding.EncodedLen(len(src))
+ dst := make([]byte, n)
+ bcEncoding.Encode(dst, src)
+ for dst[n-1] == '=' {
+ n--
+ }
+ return dst[:n]
+}
+
+func base64Decode(src []byte) ([]byte, error) {
+ numOfEquals := 4 - (len(src) % 4)
+ for i := 0; i < numOfEquals; i++ {
+ src = append(src, '=')
+ }
+
+ dst := make([]byte, bcEncoding.DecodedLen(len(src)))
+ n, err := bcEncoding.Decode(dst, src)
+ if err != nil {
+ return nil, err
+ }
+ return dst[:n], nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
new file mode 100644
index 00000000..aeb73f81
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
@@ -0,0 +1,295 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
+// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
+package bcrypt // import "golang.org/x/crypto/bcrypt"
+
+// The code is a port of Provos and Mazières's C implementation.
+import (
+ "crypto/rand"
+ "crypto/subtle"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+
+ "golang.org/x/crypto/blowfish"
+)
+
+const (
+ MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword
+ MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword
+ DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword
+)
+
+// The error returned from CompareHashAndPassword when a password and hash do
+// not match.
+var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
+
+// The error returned from CompareHashAndPassword when a hash is too short to
+// be a bcrypt hash.
+var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
+
+// The error returned from CompareHashAndPassword when a hash was created with
+// a bcrypt algorithm newer than this implementation.
+type HashVersionTooNewError byte
+
+func (hv HashVersionTooNewError) Error() string {
+ return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
+}
+
+// The error returned from CompareHashAndPassword when a hash starts with something other than '$'
+type InvalidHashPrefixError byte
+
+func (ih InvalidHashPrefixError) Error() string {
+ return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
+}
+
+type InvalidCostError int
+
+func (ic InvalidCostError) Error() string {
+ return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost))
+}
+
+const (
+ majorVersion = '2'
+ minorVersion = 'a'
+ maxSaltSize = 16
+ maxCryptedHashSize = 23
+ encodedSaltSize = 22
+ encodedHashSize = 31
+ minHashSize = 59
+)
+
+// magicCipherData is an IV for the 64 Blowfish encryption calls in
+// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes.
+var magicCipherData = []byte{
+ 0x4f, 0x72, 0x70, 0x68,
+ 0x65, 0x61, 0x6e, 0x42,
+ 0x65, 0x68, 0x6f, 0x6c,
+ 0x64, 0x65, 0x72, 0x53,
+ 0x63, 0x72, 0x79, 0x44,
+ 0x6f, 0x75, 0x62, 0x74,
+}
+
+type hashed struct {
+ hash []byte
+ salt []byte
+ cost int // allowed range is MinCost to MaxCost
+ major byte
+ minor byte
+}
+
+// GenerateFromPassword returns the bcrypt hash of the password at the given
+// cost. If the cost given is less than MinCost, the cost will be set to
+// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,
+// to compare the returned hashed password with its cleartext version.
+func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
+ p, err := newFromPassword(password, cost)
+ if err != nil {
+ return nil, err
+ }
+ return p.Hash(), nil
+}
+
+// CompareHashAndPassword compares a bcrypt hashed password with its possible
+// plaintext equivalent. Returns nil on success, or an error on failure.
+func CompareHashAndPassword(hashedPassword, password []byte) error {
+ p, err := newFromHash(hashedPassword)
+ if err != nil {
+ return err
+ }
+
+ otherHash, err := bcrypt(password, p.cost, p.salt)
+ if err != nil {
+ return err
+ }
+
+ otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}
+ if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {
+ return nil
+ }
+
+ return ErrMismatchedHashAndPassword
+}
+
+// Cost returns the hashing cost used to create the given hashed
+// password. When, in the future, the hashing cost of a password system needs
+// to be increased in order to adjust for greater computational power, this
+// function allows one to establish which passwords need to be updated.
+func Cost(hashedPassword []byte) (int, error) {
+ p, err := newFromHash(hashedPassword)
+ if err != nil {
+ return 0, err
+ }
+ return p.cost, nil
+}
+
+func newFromPassword(password []byte, cost int) (*hashed, error) {
+ if cost < MinCost {
+ cost = DefaultCost
+ }
+ p := new(hashed)
+ p.major = majorVersion
+ p.minor = minorVersion
+
+ err := checkCost(cost)
+ if err != nil {
+ return nil, err
+ }
+ p.cost = cost
+
+ unencodedSalt := make([]byte, maxSaltSize)
+ _, err = io.ReadFull(rand.Reader, unencodedSalt)
+ if err != nil {
+ return nil, err
+ }
+
+ p.salt = base64Encode(unencodedSalt)
+ hash, err := bcrypt(password, p.cost, p.salt)
+ if err != nil {
+ return nil, err
+ }
+ p.hash = hash
+ return p, err
+}
+
+func newFromHash(hashedSecret []byte) (*hashed, error) {
+ if len(hashedSecret) < minHashSize {
+ return nil, ErrHashTooShort
+ }
+ p := new(hashed)
+ n, err := p.decodeVersion(hashedSecret)
+ if err != nil {
+ return nil, err
+ }
+ hashedSecret = hashedSecret[n:]
+ n, err = p.decodeCost(hashedSecret)
+ if err != nil {
+ return nil, err
+ }
+ hashedSecret = hashedSecret[n:]
+
+ // The "+2" is here because we'll have to append at most 2 '=' to the salt
+ // when base64 decoding it in expensiveBlowfishSetup().
+ p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)
+ copy(p.salt, hashedSecret[:encodedSaltSize])
+
+ hashedSecret = hashedSecret[encodedSaltSize:]
+ p.hash = make([]byte, len(hashedSecret))
+ copy(p.hash, hashedSecret)
+
+ return p, nil
+}
+
+func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
+ cipherData := make([]byte, len(magicCipherData))
+ copy(cipherData, magicCipherData)
+
+ c, err := expensiveBlowfishSetup(password, uint32(cost), salt)
+ if err != nil {
+ return nil, err
+ }
+
+ for i := 0; i < 24; i += 8 {
+ for j := 0; j < 64; j++ {
+ c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
+ }
+ }
+
+ // Bug compatibility with C bcrypt implementations. We only encode 23 of
+ // the 24 bytes encrypted.
+ hsh := base64Encode(cipherData[:maxCryptedHashSize])
+ return hsh, nil
+}
+
+func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
+ csalt, err := base64Decode(salt)
+ if err != nil {
+ return nil, err
+ }
+
+ // Bug compatibility with C bcrypt implementations. They use the trailing
+ // NULL in the key string during expansion.
+ // We copy the key to prevent changing the underlying array.
+ ckey := append(key[:len(key):len(key)], 0)
+
+ c, err := blowfish.NewSaltedCipher(ckey, csalt)
+ if err != nil {
+ return nil, err
+ }
+
+ var i, rounds uint64
+ rounds = 1 << cost
+ for i = 0; i < rounds; i++ {
+ blowfish.ExpandKey(ckey, c)
+ blowfish.ExpandKey(csalt, c)
+ }
+
+ return c, nil
+}
+
+func (p *hashed) Hash() []byte {
+ arr := make([]byte, 60)
+ arr[0] = '$'
+ arr[1] = p.major
+ n := 2
+ if p.minor != 0 {
+ arr[2] = p.minor
+ n = 3
+ }
+ arr[n] = '$'
+ n++
+ copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
+ n += 2
+ arr[n] = '$'
+ n++
+ copy(arr[n:], p.salt)
+ n += encodedSaltSize
+ copy(arr[n:], p.hash)
+ n += encodedHashSize
+ return arr[:n]
+}
+
+func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
+ if sbytes[0] != '$' {
+ return -1, InvalidHashPrefixError(sbytes[0])
+ }
+ if sbytes[1] > majorVersion {
+ return -1, HashVersionTooNewError(sbytes[1])
+ }
+ p.major = sbytes[1]
+ n := 3
+ if sbytes[2] != '$' {
+ p.minor = sbytes[2]
+ n++
+ }
+ return n, nil
+}
+
+// sbytes should begin where decodeVersion left off.
+func (p *hashed) decodeCost(sbytes []byte) (int, error) {
+ cost, err := strconv.Atoi(string(sbytes[0:2]))
+ if err != nil {
+ return -1, err
+ }
+ err = checkCost(cost)
+ if err != nil {
+ return -1, err
+ }
+ p.cost = cost
+ return 3, nil
+}
+
+func (p *hashed) String() string {
+ return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
+}
+
+func checkCost(cost int) error {
+ if cost < MinCost || cost > MaxCost {
+ return InvalidCostError(cost)
+ }
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/LICENSE.txt
new file mode 100644
index 00000000..ead98cf0
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/LICENSE.txt
@@ -0,0 +1,897 @@
+Mattermost Licensing
+
+SOFTWARE LICENSING
+
+You are licensed to use compiled versions of the Mattermost platform produced by Mattermost, Inc. under an MIT LICENSE
+
+- See MIT-COMPILED-LICENSE.md included in compiled versions for details
+
+You may be licensed to use source code to create compiled versions not produced by Mattermost, Inc. in one of two ways:
+
+1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or
+2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com
+
+You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/, model/,
+webapp/client, webapp/fonts, webapp/i18n, webapp/images and all subdirectories thereof) under the Apache License v2.0.
+
+We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not
+link to the Mattermost Platform directly, but exclusively uses the Mattermost Admin Tools and Configuration Files, and
+(b) you have not modified, added to or adapted the source code of Mattermost in a way that results in the creation of
+a “modified version” or “work based on” Mattermost as these terms are defined in the AGPL v3.0 license.
+
+MATTERMOST TRADEMARK GUIDELINES
+
+Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organization’s Trademark
+Standards of Use at http://www.mattermost.org/trademark-standards-of-use/. For trademark approval or any questions
+you have about using these trademarks, please email trademark@mattermost.com
+
+------------------------------------------------------------------------------------------------------------------------------
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+------------------------------------------------------------------------------
+
+The software is released under the terms of the GNU Affero General Public
+License, version 3.
+
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/block.go
new file mode 100644
index 00000000..9d80f195
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/block.go
@@ -0,0 +1,159 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package blowfish
+
+// getNextWord returns the next big-endian uint32 value from the byte slice
+// at the given position in a circular manner, updating the position.
+func getNextWord(b []byte, pos *int) uint32 {
+ var w uint32
+ j := *pos
+ for i := 0; i < 4; i++ {
+ w = w<<8 | uint32(b[j])
+ j++
+ if j >= len(b) {
+ j = 0
+ }
+ }
+ *pos = j
+ return w
+}
+
+// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
+// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
+// pi and substitution tables for calls to Encrypt. This is used, primarily,
+// by the bcrypt package to reuse the Blowfish key schedule during its
+// set up. It's unlikely that you need to use this directly.
+func ExpandKey(key []byte, c *Cipher) {
+ j := 0
+ for i := 0; i < 18; i++ {
+ // Using inlined getNextWord for performance.
+ var d uint32
+ for k := 0; k < 4; k++ {
+ d = d<<8 | uint32(key[j])
+ j++
+ if j >= len(key) {
+ j = 0
+ }
+ }
+ c.p[i] ^= d
+ }
+
+ var l, r uint32
+ for i := 0; i < 18; i += 2 {
+ l, r = encryptBlock(l, r, c)
+ c.p[i], c.p[i+1] = l, r
+ }
+
+ for i := 0; i < 256; i += 2 {
+ l, r = encryptBlock(l, r, c)
+ c.s0[i], c.s0[i+1] = l, r
+ }
+ for i := 0; i < 256; i += 2 {
+ l, r = encryptBlock(l, r, c)
+ c.s1[i], c.s1[i+1] = l, r
+ }
+ for i := 0; i < 256; i += 2 {
+ l, r = encryptBlock(l, r, c)
+ c.s2[i], c.s2[i+1] = l, r
+ }
+ for i := 0; i < 256; i += 2 {
+ l, r = encryptBlock(l, r, c)
+ c.s3[i], c.s3[i+1] = l, r
+ }
+}
+
+// This is similar to ExpandKey, but folds the salt during the key
+// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
+// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
+// and specializing it here is useful.
+func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
+ j := 0
+ for i := 0; i < 18; i++ {
+ c.p[i] ^= getNextWord(key, &j)
+ }
+
+ j = 0
+ var l, r uint32
+ for i := 0; i < 18; i += 2 {
+ l ^= getNextWord(salt, &j)
+ r ^= getNextWord(salt, &j)
+ l, r = encryptBlock(l, r, c)
+ c.p[i], c.p[i+1] = l, r
+ }
+
+ for i := 0; i < 256; i += 2 {
+ l ^= getNextWord(salt, &j)
+ r ^= getNextWord(salt, &j)
+ l, r = encryptBlock(l, r, c)
+ c.s0[i], c.s0[i+1] = l, r
+ }
+
+ for i := 0; i < 256; i += 2 {
+ l ^= getNextWord(salt, &j)
+ r ^= getNextWord(salt, &j)
+ l, r = encryptBlock(l, r, c)
+ c.s1[i], c.s1[i+1] = l, r
+ }
+
+ for i := 0; i < 256; i += 2 {
+ l ^= getNextWord(salt, &j)
+ r ^= getNextWord(salt, &j)
+ l, r = encryptBlock(l, r, c)
+ c.s2[i], c.s2[i+1] = l, r
+ }
+
+ for i := 0; i < 256; i += 2 {
+ l ^= getNextWord(salt, &j)
+ r ^= getNextWord(salt, &j)
+ l, r = encryptBlock(l, r, c)
+ c.s3[i], c.s3[i+1] = l, r
+ }
+}
+
+func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
+ xl, xr := l, r
+ xl ^= c.p[0]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
+ xr ^= c.p[17]
+ return xr, xl
+}
+
+func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
+ xl, xr := l, r
+ xl ^= c.p[17]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
+ xr ^= c.p[0]
+ return xr, xl
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/cipher.go
new file mode 100644
index 00000000..2641dadd
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/cipher.go
@@ -0,0 +1,91 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
+package blowfish // import "golang.org/x/crypto/blowfish"
+
+// The code is a port of Bruce Schneier's C implementation.
+// See https://www.schneier.com/blowfish.html.
+
+import "strconv"
+
+// The Blowfish block size in bytes.
+const BlockSize = 8
+
+// A Cipher is an instance of Blowfish encryption using a particular key.
+type Cipher struct {
+ p [18]uint32
+ s0, s1, s2, s3 [256]uint32
+}
+
+type KeySizeError int
+
+func (k KeySizeError) Error() string {
+ return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
+}
+
+// NewCipher creates and returns a Cipher.
+// The key argument should be the Blowfish key, from 1 to 56 bytes.
+func NewCipher(key []byte) (*Cipher, error) {
+ var result Cipher
+ if k := len(key); k < 1 || k > 56 {
+ return nil, KeySizeError(k)
+ }
+ initCipher(&result)
+ ExpandKey(key, &result)
+ return &result, nil
+}
+
+// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
+// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
+// sufficient and desirable. For bcrypt compatibility, the key can be over 56
+// bytes.
+func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
+ if len(salt) == 0 {
+ return NewCipher(key)
+ }
+ var result Cipher
+ if k := len(key); k < 1 {
+ return nil, KeySizeError(k)
+ }
+ initCipher(&result)
+ expandKeyWithSalt(key, salt, &result)
+ return &result, nil
+}
+
+// BlockSize returns the Blowfish block size, 8 bytes.
+// It is necessary to satisfy the Block interface in the
+// package "crypto/cipher".
+func (c *Cipher) BlockSize() int { return BlockSize }
+
+// Encrypt encrypts the 8-byte buffer src using the key k
+// and stores the result in dst.
+// Note that for amounts of data larger than a block,
+// it is not safe to just call Encrypt on successive blocks;
+// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
+func (c *Cipher) Encrypt(dst, src []byte) {
+ l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
+ r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
+ l, r = encryptBlock(l, r, c)
+ dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
+ dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
+}
+
+// Decrypt decrypts the 8-byte buffer src using the key k
+// and stores the result in dst.
+func (c *Cipher) Decrypt(dst, src []byte) {
+ l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
+ r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
+ l, r = decryptBlock(l, r, c)
+ dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
+ dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
+}
+
+func initCipher(c *Cipher) {
+ copy(c.p[0:], p[0:])
+ copy(c.s0[0:], s0[0:])
+ copy(c.s1[0:], s1[0:])
+ copy(c.s2[0:], s2[0:])
+ copy(c.s3[0:], s3[0:])
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/const.go
new file mode 100644
index 00000000..d0407759
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/const.go
@@ -0,0 +1,199 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The startup permutation array and substitution boxes.
+// They are the hexadecimal digits of PI; see:
+// https://www.schneier.com/code/constants.txt.
+
+package blowfish
+
+var s0 = [256]uint32{
+ 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
+ 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
+ 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658,
+ 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
+ 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e,
+ 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
+ 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6,
+ 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
+ 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c,
+ 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
+ 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
+ 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
+ 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a,
+ 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
+ 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176,
+ 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
+ 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706,
+ 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
+ 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b,
+ 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
+ 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c,
+ 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
+ 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a,
+ 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
+ 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760,
+ 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
+ 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8,
+ 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
+ 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33,
+ 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
+ 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0,
+ 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
+ 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
+ 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
+ 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705,
+ 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
+ 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e,
+ 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
+ 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9,
+ 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
+ 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f,
+ 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
+ 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
+}
+
+var s1 = [256]uint32{
+ 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d,
+ 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
+ 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65,
+ 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
+ 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9,
+ 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
+ 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d,
+ 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
+ 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc,
+ 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
+ 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
+ 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
+ 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124,
+ 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
+ 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908,
+ 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
+ 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b,
+ 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
+ 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa,
+ 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
+ 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d,
+ 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
+ 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5,
+ 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
+ 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96,
+ 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
+ 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca,
+ 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
+ 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77,
+ 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
+ 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054,
+ 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
+ 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
+ 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
+ 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646,
+ 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
+ 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea,
+ 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
+ 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e,
+ 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
+ 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd,
+ 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
+ 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
+}
+
+var s2 = [256]uint32{
+ 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7,
+ 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
+ 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af,
+ 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
+ 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4,
+ 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
+ 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec,
+ 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
+ 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332,
+ 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
+ 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
+ 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
+ 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22,
+ 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
+ 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60,
+ 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
+ 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99,
+ 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
+ 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74,
+ 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
+ 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3,
+ 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
+ 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979,
+ 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
+ 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa,
+ 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
+ 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086,
+ 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
+ 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24,
+ 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
+ 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84,
+ 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
+ 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
+ 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
+ 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe,
+ 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
+ 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0,
+ 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
+ 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188,
+ 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
+ 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8,
+ 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
+ 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
+}
+
+var s3 = [256]uint32{
+ 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742,
+ 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
+ 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79,
+ 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
+ 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a,
+ 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
+ 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1,
+ 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
+ 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797,
+ 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
+ 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
+ 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
+ 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba,
+ 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
+ 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5,
+ 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
+ 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce,
+ 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
+ 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd,
+ 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
+ 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb,
+ 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
+ 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc,
+ 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
+ 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc,
+ 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
+ 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a,
+ 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
+ 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a,
+ 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
+ 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b,
+ 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
+ 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
+ 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
+ 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623,
+ 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
+ 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a,
+ 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
+ 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3,
+ 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
+ 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c,
+ 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
+ 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
+}
+
+var p = [18]uint32{
+ 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0,
+ 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
+ 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b,
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/LICENSE.txt
new file mode 100644
index 00000000..ead98cf0
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/LICENSE.txt
@@ -0,0 +1,897 @@
+Mattermost Licensing
+
+SOFTWARE LICENSING
+
+You are licensed to use compiled versions of the Mattermost platform produced by Mattermost, Inc. under an MIT LICENSE
+
+- See MIT-COMPILED-LICENSE.md included in compiled versions for details
+
+You may be licensed to use source code to create compiled versions not produced by Mattermost, Inc. in one of two ways:
+
+1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or
+2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com
+
+You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/, model/,
+webapp/client, webapp/fonts, webapp/i18n, webapp/images and all subdirectories thereof) under the Apache License v2.0.
+
+We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not
+link to the Mattermost Platform directly, but exclusively uses the Mattermost Admin Tools and Configuration Files, and
+(b) you have not modified, added to or adapted the source code of Mattermost in a way that results in the creation of
+a “modified version” or “work based on” Mattermost as these terms are defined in the AGPL v3.0 license.
+
+MATTERMOST TRADEMARK GUIDELINES
+
+Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organization’s Trademark
+Standards of Use at http://www.mattermost.org/trademark-standards-of-use/. For trademark approval or any questions
+you have about using these trademarks, please email trademark@mattermost.com
+
+------------------------------------------------------------------------------------------------------------------------------
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+------------------------------------------------------------------------------
+
+The software is released under the terms of the GNU Affero General Public
+License, version 3.
+
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/ber.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/ber.go
new file mode 100644
index 00000000..25cc921b
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/ber.go
@@ -0,0 +1,504 @@
+package ber
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+type Packet struct {
+ Identifier
+ Value interface{}
+ ByteValue []byte
+ Data *bytes.Buffer
+ Children []*Packet
+ Description string
+}
+
+type Identifier struct {
+ ClassType Class
+ TagType Type
+ Tag Tag
+}
+
+type Tag uint64
+
+const (
+ TagEOC Tag = 0x00
+ TagBoolean Tag = 0x01
+ TagInteger Tag = 0x02
+ TagBitString Tag = 0x03
+ TagOctetString Tag = 0x04
+ TagNULL Tag = 0x05
+ TagObjectIdentifier Tag = 0x06
+ TagObjectDescriptor Tag = 0x07
+ TagExternal Tag = 0x08
+ TagRealFloat Tag = 0x09
+ TagEnumerated Tag = 0x0a
+ TagEmbeddedPDV Tag = 0x0b
+ TagUTF8String Tag = 0x0c
+ TagRelativeOID Tag = 0x0d
+ TagSequence Tag = 0x10
+ TagSet Tag = 0x11
+ TagNumericString Tag = 0x12
+ TagPrintableString Tag = 0x13
+ TagT61String Tag = 0x14
+ TagVideotexString Tag = 0x15
+ TagIA5String Tag = 0x16
+ TagUTCTime Tag = 0x17
+ TagGeneralizedTime Tag = 0x18
+ TagGraphicString Tag = 0x19
+ TagVisibleString Tag = 0x1a
+ TagGeneralString Tag = 0x1b
+ TagUniversalString Tag = 0x1c
+ TagCharacterString Tag = 0x1d
+ TagBMPString Tag = 0x1e
+ TagBitmask Tag = 0x1f // xxx11111b
+
+ // HighTag indicates the start of a high-tag byte sequence
+ HighTag Tag = 0x1f // xxx11111b
+ // HighTagContinueBitmask indicates the high-tag byte sequence should continue
+ HighTagContinueBitmask Tag = 0x80 // 10000000b
+ // HighTagValueBitmask obtains the tag value from a high-tag byte sequence byte
+ HighTagValueBitmask Tag = 0x7f // 01111111b
+)
+
+const (
+ // LengthLongFormBitmask is the mask to apply to the length byte to see if a long-form byte sequence is used
+ LengthLongFormBitmask = 0x80
+ // LengthValueBitmask is the mask to apply to the length byte to get the number of bytes in the long-form byte sequence
+ LengthValueBitmask = 0x7f
+
+ // LengthIndefinite is returned from readLength to indicate an indefinite length
+ LengthIndefinite = -1
+)
+
+var tagMap = map[Tag]string{
+ TagEOC: "EOC (End-of-Content)",
+ TagBoolean: "Boolean",
+ TagInteger: "Integer",
+ TagBitString: "Bit String",
+ TagOctetString: "Octet String",
+ TagNULL: "NULL",
+ TagObjectIdentifier: "Object Identifier",
+ TagObjectDescriptor: "Object Descriptor",
+ TagExternal: "External",
+ TagRealFloat: "Real (float)",
+ TagEnumerated: "Enumerated",
+ TagEmbeddedPDV: "Embedded PDV",
+ TagUTF8String: "UTF8 String",
+ TagRelativeOID: "Relative-OID",
+ TagSequence: "Sequence and Sequence of",
+ TagSet: "Set and Set OF",
+ TagNumericString: "Numeric String",
+ TagPrintableString: "Printable String",
+ TagT61String: "T61 String",
+ TagVideotexString: "Videotex String",
+ TagIA5String: "IA5 String",
+ TagUTCTime: "UTC Time",
+ TagGeneralizedTime: "Generalized Time",
+ TagGraphicString: "Graphic String",
+ TagVisibleString: "Visible String",
+ TagGeneralString: "General String",
+ TagUniversalString: "Universal String",
+ TagCharacterString: "Character String",
+ TagBMPString: "BMP String",
+}
+
+type Class uint8
+
+const (
+ ClassUniversal Class = 0 // 00xxxxxxb
+ ClassApplication Class = 64 // 01xxxxxxb
+ ClassContext Class = 128 // 10xxxxxxb
+ ClassPrivate Class = 192 // 11xxxxxxb
+ ClassBitmask Class = 192 // 11xxxxxxb
+)
+
+var ClassMap = map[Class]string{
+ ClassUniversal: "Universal",
+ ClassApplication: "Application",
+ ClassContext: "Context",
+ ClassPrivate: "Private",
+}
+
+type Type uint8
+
+const (
+ TypePrimitive Type = 0 // xx0xxxxxb
+ TypeConstructed Type = 32 // xx1xxxxxb
+ TypeBitmask Type = 32 // xx1xxxxxb
+)
+
+var TypeMap = map[Type]string{
+ TypePrimitive: "Primitive",
+ TypeConstructed: "Constructed",
+}
+
+var Debug bool = false
+
+func PrintBytes(out io.Writer, buf []byte, indent string) {
+ data_lines := make([]string, (len(buf)/30)+1)
+ num_lines := make([]string, (len(buf)/30)+1)
+
+ for i, b := range buf {
+ data_lines[i/30] += fmt.Sprintf("%02x ", b)
+ num_lines[i/30] += fmt.Sprintf("%02d ", (i+1)%100)
+ }
+
+ for i := 0; i < len(data_lines); i++ {
+ out.Write([]byte(indent + data_lines[i] + "\n"))
+ out.Write([]byte(indent + num_lines[i] + "\n\n"))
+ }
+}
+
+func PrintPacket(p *Packet) {
+ printPacket(os.Stdout, p, 0, false)
+}
+
+func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) {
+ indent_str := ""
+
+ for len(indent_str) != indent {
+ indent_str += " "
+ }
+
+ class_str := ClassMap[p.ClassType]
+
+ tagtype_str := TypeMap[p.TagType]
+
+ tag_str := fmt.Sprintf("0x%02X", p.Tag)
+
+ if p.ClassType == ClassUniversal {
+ tag_str = tagMap[p.Tag]
+ }
+
+ value := fmt.Sprint(p.Value)
+ description := ""
+
+ if p.Description != "" {
+ description = p.Description + ": "
+ }
+
+ fmt.Fprintf(out, "%s%s(%s, %s, %s) Len=%d %q\n", indent_str, description, class_str, tagtype_str, tag_str, p.Data.Len(), value)
+
+ if printBytes {
+ PrintBytes(out, p.Bytes(), indent_str)
+ }
+
+ for _, child := range p.Children {
+ printPacket(out, child, indent+1, printBytes)
+ }
+}
+
+// ReadPacket reads a single Packet from the reader
+func ReadPacket(reader io.Reader) (*Packet, error) {
+ p, _, err := readPacket(reader)
+ if err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+func DecodeString(data []byte) string {
+ return string(data)
+}
+
+func parseInt64(bytes []byte) (ret int64, err error) {
+ if len(bytes) > 8 {
+ // We'll overflow an int64 in this case.
+ err = fmt.Errorf("integer too large")
+ return
+ }
+ for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
+ ret <<= 8
+ ret |= int64(bytes[bytesRead])
+ }
+
+ // Shift up and down in order to sign extend the result.
+ ret <<= 64 - uint8(len(bytes))*8
+ ret >>= 64 - uint8(len(bytes))*8
+ return
+}
+
+func encodeInteger(i int64) []byte {
+ n := int64Length(i)
+ out := make([]byte, n)
+
+ var j int
+ for ; n > 0; n-- {
+ out[j] = (byte(i >> uint((n-1)*8)))
+ j++
+ }
+
+ return out
+}
+
+func int64Length(i int64) (numBytes int) {
+ numBytes = 1
+
+ for i > 127 {
+ numBytes++
+ i >>= 8
+ }
+
+ for i < -128 {
+ numBytes++
+ i >>= 8
+ }
+
+ return
+}
+
+// DecodePacket decodes the given bytes into a single Packet
+// If a decode error is encountered, nil is returned.
+func DecodePacket(data []byte) *Packet {
+ p, _, _ := readPacket(bytes.NewBuffer(data))
+
+ return p
+}
+
+// DecodePacketErr decodes the given bytes into a single Packet
+// If a decode error is encountered, nil is returned
+func DecodePacketErr(data []byte) (*Packet, error) {
+ p, _, err := readPacket(bytes.NewBuffer(data))
+ if err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// readPacket reads a single Packet from the reader, returning the number of bytes read
+func readPacket(reader io.Reader) (*Packet, int, error) {
+ identifier, length, read, err := readHeader(reader)
+ if err != nil {
+ return nil, read, err
+ }
+
+ p := &Packet{
+ Identifier: identifier,
+ }
+
+ p.Data = new(bytes.Buffer)
+ p.Children = make([]*Packet, 0, 2)
+ p.Value = nil
+
+ if p.TagType == TypeConstructed {
+ // TODO: if universal, ensure tag type is allowed to be constructed
+
+ // Track how much content we've read
+ contentRead := 0
+ for {
+ if length != LengthIndefinite {
+ // End if we've read what we've been told to
+ if contentRead == length {
+ break
+ }
+ // Detect if a packet boundary didn't fall on the expected length
+ if contentRead > length {
+ return nil, read, fmt.Errorf("expected to read %d bytes, read %d", length, contentRead)
+ }
+ }
+
+ // Read the next packet
+ child, r, err := readPacket(reader)
+ if err != nil {
+ return nil, read, err
+ }
+ contentRead += r
+ read += r
+
+ // Test is this is the EOC marker for our packet
+ if isEOCPacket(child) {
+ if length == LengthIndefinite {
+ break
+ }
+ return nil, read, errors.New("eoc child not allowed with definite length")
+ }
+
+ // Append and continue
+ p.AppendChild(child)
+ }
+ return p, read, nil
+ }
+
+ if length == LengthIndefinite {
+ return nil, read, errors.New("indefinite length used with primitive type")
+ }
+
+ // Read definite-length content
+ content := make([]byte, length, length)
+ if length > 0 {
+ _, err := io.ReadFull(reader, content)
+ if err != nil {
+ if err == io.EOF {
+ return nil, read, io.ErrUnexpectedEOF
+ }
+ return nil, read, err
+ }
+ read += length
+ }
+
+ if p.ClassType == ClassUniversal {
+ p.Data.Write(content)
+ p.ByteValue = content
+
+ switch p.Tag {
+ case TagEOC:
+ case TagBoolean:
+ val, _ := parseInt64(content)
+
+ p.Value = val != 0
+ case TagInteger:
+ p.Value, _ = parseInt64(content)
+ case TagBitString:
+ case TagOctetString:
+ // the actual string encoding is not known here
+ // (e.g. for LDAP content is already an UTF8-encoded
+ // string). Return the data without further processing
+ p.Value = DecodeString(content)
+ case TagNULL:
+ case TagObjectIdentifier:
+ case TagObjectDescriptor:
+ case TagExternal:
+ case TagRealFloat:
+ case TagEnumerated:
+ p.Value, _ = parseInt64(content)
+ case TagEmbeddedPDV:
+ case TagUTF8String:
+ p.Value = DecodeString(content)
+ case TagRelativeOID:
+ case TagSequence:
+ case TagSet:
+ case TagNumericString:
+ case TagPrintableString:
+ p.Value = DecodeString(content)
+ case TagT61String:
+ case TagVideotexString:
+ case TagIA5String:
+ case TagUTCTime:
+ case TagGeneralizedTime:
+ case TagGraphicString:
+ case TagVisibleString:
+ case TagGeneralString:
+ case TagUniversalString:
+ case TagCharacterString:
+ case TagBMPString:
+ }
+ } else {
+ p.Data.Write(content)
+ }
+
+ return p, read, nil
+}
+
+func (p *Packet) Bytes() []byte {
+ var out bytes.Buffer
+
+ out.Write(encodeIdentifier(p.Identifier))
+ out.Write(encodeLength(p.Data.Len()))
+ out.Write(p.Data.Bytes())
+
+ return out.Bytes()
+}
+
+func (p *Packet) AppendChild(child *Packet) {
+ p.Data.Write(child.Bytes())
+ p.Children = append(p.Children, child)
+}
+
+func Encode(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet {
+ p := new(Packet)
+
+ p.ClassType = ClassType
+ p.TagType = TagType
+ p.Tag = Tag
+ p.Data = new(bytes.Buffer)
+
+ p.Children = make([]*Packet, 0, 2)
+
+ p.Value = Value
+ p.Description = Description
+
+ if Value != nil {
+ v := reflect.ValueOf(Value)
+
+ if ClassType == ClassUniversal {
+ switch Tag {
+ case TagOctetString:
+ sv, ok := v.Interface().(string)
+
+ if ok {
+ p.Data.Write([]byte(sv))
+ }
+ }
+ }
+ }
+
+ return p
+}
+
+func NewSequence(Description string) *Packet {
+ return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, Description)
+}
+
+func NewBoolean(ClassType Class, TagType Type, Tag Tag, Value bool, Description string) *Packet {
+ intValue := int64(0)
+
+ if Value {
+ intValue = 1
+ }
+
+ p := Encode(ClassType, TagType, Tag, nil, Description)
+
+ p.Value = Value
+ p.Data.Write(encodeInteger(intValue))
+
+ return p
+}
+
+func NewInteger(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet {
+ p := Encode(ClassType, TagType, Tag, nil, Description)
+
+ p.Value = Value
+ switch v := Value.(type) {
+ case int:
+ p.Data.Write(encodeInteger(int64(v)))
+ case uint:
+ p.Data.Write(encodeInteger(int64(v)))
+ case int64:
+ p.Data.Write(encodeInteger(v))
+ case uint64:
+ // TODO : check range or add encodeUInt...
+ p.Data.Write(encodeInteger(int64(v)))
+ case int32:
+ p.Data.Write(encodeInteger(int64(v)))
+ case uint32:
+ p.Data.Write(encodeInteger(int64(v)))
+ case int16:
+ p.Data.Write(encodeInteger(int64(v)))
+ case uint16:
+ p.Data.Write(encodeInteger(int64(v)))
+ case int8:
+ p.Data.Write(encodeInteger(int64(v)))
+ case uint8:
+ p.Data.Write(encodeInteger(int64(v)))
+ default:
+ // TODO : add support for big.Int ?
+ panic(fmt.Sprintf("Invalid type %T, expected {u|}int{64|32|16|8}", v))
+ }
+
+ return p
+}
+
+func NewString(ClassType Class, TagType Type, Tag Tag, Value, Description string) *Packet {
+ p := Encode(ClassType, TagType, Tag, nil, Description)
+
+ p.Value = Value
+ p.Data.Write([]byte(Value))
+
+ return p
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/content_int.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/content_int.go
new file mode 100644
index 00000000..1858b74b
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/content_int.go
@@ -0,0 +1,25 @@
+package ber
+
+func encodeUnsignedInteger(i uint64) []byte {
+ n := uint64Length(i)
+ out := make([]byte, n)
+
+ var j int
+ for ; n > 0; n-- {
+ out[j] = (byte(i >> uint((n-1)*8)))
+ j++
+ }
+
+ return out
+}
+
+func uint64Length(i uint64) (numBytes int) {
+ numBytes = 1
+
+ for i > 255 {
+ numBytes++
+ i >>= 8
+ }
+
+ return
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/header.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/header.go
new file mode 100644
index 00000000..123744e9
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/header.go
@@ -0,0 +1,29 @@
+package ber
+
+import (
+ "errors"
+ "io"
+)
+
+func readHeader(reader io.Reader) (identifier Identifier, length int, read int, err error) {
+ if i, c, err := readIdentifier(reader); err != nil {
+ return Identifier{}, 0, read, err
+ } else {
+ identifier = i
+ read += c
+ }
+
+ if l, c, err := readLength(reader); err != nil {
+ return Identifier{}, 0, read, err
+ } else {
+ length = l
+ read += c
+ }
+
+ // Validate length type with identifier (x.600, 8.1.3.2.a)
+ if length == LengthIndefinite && identifier.TagType == TypePrimitive {
+ return Identifier{}, 0, read, errors.New("indefinite length used with primitive type")
+ }
+
+ return identifier, length, read, nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/identifier.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/identifier.go
new file mode 100644
index 00000000..f7672a84
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/identifier.go
@@ -0,0 +1,103 @@
+package ber
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+)
+
+func readIdentifier(reader io.Reader) (Identifier, int, error) {
+ identifier := Identifier{}
+ read := 0
+
+ // identifier byte
+ b, err := readByte(reader)
+ if err != nil {
+ if Debug {
+ fmt.Printf("error reading identifier byte: %v\n", err)
+ }
+ return Identifier{}, read, err
+ }
+ read++
+
+ identifier.ClassType = Class(b) & ClassBitmask
+ identifier.TagType = Type(b) & TypeBitmask
+
+ if tag := Tag(b) & TagBitmask; tag != HighTag {
+ // short-form tag
+ identifier.Tag = tag
+ return identifier, read, nil
+ }
+
+ // high-tag-number tag
+ tagBytes := 0
+ for {
+ b, err := readByte(reader)
+ if err != nil {
+ if Debug {
+ fmt.Printf("error reading high-tag-number tag byte %d: %v\n", tagBytes, err)
+ }
+ return Identifier{}, read, err
+ }
+ tagBytes++
+ read++
+
+ // Lowest 7 bits get appended to the tag value (x.690, 8.1.2.4.2.b)
+ identifier.Tag <<= 7
+ identifier.Tag |= Tag(b) & HighTagValueBitmask
+
+ // First byte may not be all zeros (x.690, 8.1.2.4.2.c)
+ if tagBytes == 1 && identifier.Tag == 0 {
+ return Identifier{}, read, errors.New("invalid first high-tag-number tag byte")
+ }
+ // Overflow of int64
+ // TODO: support big int tags?
+ if tagBytes > 9 {
+ return Identifier{}, read, errors.New("high-tag-number tag overflow")
+ }
+
+ // Top bit of 0 means this is the last byte in the high-tag-number tag (x.690, 8.1.2.4.2.a)
+ if Tag(b)&HighTagContinueBitmask == 0 {
+ break
+ }
+ }
+
+ return identifier, read, nil
+}
+
+func encodeIdentifier(identifier Identifier) []byte {
+ b := []byte{0x0}
+ b[0] |= byte(identifier.ClassType)
+ b[0] |= byte(identifier.TagType)
+
+ if identifier.Tag < HighTag {
+ // Short-form
+ b[0] |= byte(identifier.Tag)
+ } else {
+ // high-tag-number
+ b[0] |= byte(HighTag)
+
+ tag := identifier.Tag
+
+ highBit := uint(63)
+ for {
+ if tag&(1<<highBit) != 0 {
+ break
+ }
+ highBit--
+ }
+
+ tagBytes := int(math.Ceil(float64(highBit) / 7.0))
+ for i := tagBytes - 1; i >= 0; i-- {
+ offset := uint(i) * 7
+ mask := Tag(0x7f) << offset
+ tagByte := (tag & mask) >> offset
+ if i != 0 {
+ tagByte |= 0x80
+ }
+ b = append(b, byte(tagByte))
+ }
+ }
+ return b
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/length.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/length.go
new file mode 100644
index 00000000..750e8f44
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/length.go
@@ -0,0 +1,81 @@
+package ber
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+func readLength(reader io.Reader) (length int, read int, err error) {
+ // length byte
+ b, err := readByte(reader)
+ if err != nil {
+ if Debug {
+ fmt.Printf("error reading length byte: %v\n", err)
+ }
+ return 0, 0, err
+ }
+ read++
+
+ switch {
+ case b == 0xFF:
+ // Invalid 0xFF (x.600, 8.1.3.5.c)
+ return 0, read, errors.New("invalid length byte 0xff")
+
+ case b == LengthLongFormBitmask:
+ // Indefinite form, we have to decode packets until we encounter an EOC packet (x.600, 8.1.3.6)
+ length = LengthIndefinite
+
+ case b&LengthLongFormBitmask == 0:
+ // Short definite form, extract the length from the bottom 7 bits (x.600, 8.1.3.4)
+ length = int(b) & LengthValueBitmask
+
+ case b&LengthLongFormBitmask != 0:
+ // Long definite form, extract the number of length bytes to follow from the bottom 7 bits (x.600, 8.1.3.5.b)
+ lengthBytes := int(b) & LengthValueBitmask
+ // Protect against overflow
+ // TODO: support big int length?
+ if lengthBytes > 8 {
+ return 0, read, errors.New("long-form length overflow")
+ }
+
+ // Accumulate into a 64-bit variable
+ var length64 int64
+ for i := 0; i < lengthBytes; i++ {
+ b, err = readByte(reader)
+ if err != nil {
+ if Debug {
+ fmt.Printf("error reading long-form length byte %d: %v\n", i, err)
+ }
+ return 0, read, err
+ }
+ read++
+
+ // x.600, 8.1.3.5
+ length64 <<= 8
+ length64 |= int64(b)
+ }
+
+ // Cast to a platform-specific integer
+ length = int(length64)
+ // Ensure we didn't overflow
+ if int64(length) != length64 {
+ return 0, read, errors.New("long-form length overflow")
+ }
+
+ default:
+ return 0, read, errors.New("invalid length byte")
+ }
+
+ return length, read, nil
+}
+
+func encodeLength(length int) []byte {
+ length_bytes := encodeUnsignedInteger(uint64(length))
+ if length > 127 || len(length_bytes) > 1 {
+ longFormBytes := []byte{(LengthLongFormBitmask | byte(len(length_bytes)))}
+ longFormBytes = append(longFormBytes, length_bytes...)
+ length_bytes = longFormBytes
+ }
+ return length_bytes
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/util.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/util.go
new file mode 100644
index 00000000..3e56b66c
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/util.go
@@ -0,0 +1,24 @@
+package ber
+
+import "io"
+
+func readByte(reader io.Reader) (byte, error) {
+ bytes := make([]byte, 1, 1)
+ _, err := io.ReadFull(reader, bytes)
+ if err != nil {
+ if err == io.EOF {
+ return 0, io.ErrUnexpectedEOF
+ }
+ return 0, err
+ }
+ return bytes[0], nil
+}
+
+func isEOCPacket(p *Packet) bool {
+ return p != nil &&
+ p.Tag == TagEOC &&
+ p.ClassType == ClassUniversal &&
+ p.TagType == TypePrimitive &&
+ len(p.ByteValue) == 0 &&
+ len(p.Children) == 0
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/LICENSE.txt
new file mode 100644
index 00000000..ead98cf0
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/LICENSE.txt
@@ -0,0 +1,897 @@
+Mattermost Licensing
+
+SOFTWARE LICENSING
+
+You are licensed to use compiled versions of the Mattermost platform produced by Mattermost, Inc. under an MIT LICENSE
+
+- See MIT-COMPILED-LICENSE.md included in compiled versions for details
+
+You may be licensed to use source code to create compiled versions not produced by Mattermost, Inc. in one of two ways:
+
+1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or
+2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com
+
+You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/, model/,
+webapp/client, webapp/fonts, webapp/i18n, webapp/images and all subdirectories thereof) under the Apache License v2.0.
+
+We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not
+link to the Mattermost Platform directly, but exclusively uses the Mattermost Admin Tools and Configuration Files, and
+(b) you have not modified, added to or adapted the source code of Mattermost in a way that results in the creation of
+a “modified version” or “work based on” Mattermost as these terms are defined in the AGPL v3.0 license.
+
+MATTERMOST TRADEMARK GUIDELINES
+
+Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organization’s Trademark
+Standards of Use at http://www.mattermost.org/trademark-standards-of-use/. For trademark approval or any questions
+you have about using these trademarks, please email trademark@mattermost.com
+
+------------------------------------------------------------------------------------------------------------------------------
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+------------------------------------------------------------------------------
+
+The software is released under the terms of the GNU Affero General Public
+License, version 3.
+
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/apic.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/apic.go
new file mode 100644
index 00000000..95ec014e
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/apic.go
@@ -0,0 +1,742 @@
+package yaml
+
+import (
+ "io"
+ "os"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// File read handler.
+func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_file.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_file_read_handler
+ parser.input_file = file
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+ return true
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// File write handler.
+func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_file.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_file_write_handler
+ emitter.output_file = file
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+ return true
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+ return true
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t, implicit bool) bool {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+ return true
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+// mark yaml_mark_t = { 0, 0, 0 }
+// anchor_copy *yaml_char_t = NULL
+//
+// assert(event) // Non-NULL event object is expected.
+// assert(anchor) // Non-NULL anchor is expected.
+//
+// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+// anchor_copy = yaml_strdup(anchor)
+// if (!anchor_copy)
+// return 0
+//
+// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+// return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+ return true
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/decode.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/decode.go
new file mode 100644
index 00000000..e85eb2e3
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/decode.go
@@ -0,0 +1,685 @@
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const (
+ documentNode = 1 << iota
+ mappingNode
+ sequenceNode
+ scalarNode
+ aliasNode
+)
+
+type node struct {
+ kind int
+ line, column int
+ tag string
+ value string
+ implicit bool
+ children []*node
+ anchors map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *node
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+
+ yaml_parser_set_input_string(&p.parser, b)
+
+ p.skip()
+ if p.event.typ != yaml_STREAM_START_EVENT {
+ panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
+ }
+ p.skip()
+ return &p
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+func (p *parser) skip() {
+ if p.event.typ != yaml_NO_EVENT {
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ yaml_event_delete(&p.event)
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+ if anchor != nil {
+ p.doc.anchors[string(anchor)] = n
+ }
+}
+
+func (p *parser) parse() *node {
+ switch p.event.typ {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ default:
+ panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
+ }
+}
+
+func (p *parser) node(kind int) *node {
+ return &node{
+ kind: kind,
+ line: p.event.start_mark.line,
+ column: p.event.start_mark.column,
+ }
+}
+
+func (p *parser) document() *node {
+ n := p.node(documentNode)
+ n.anchors = make(map[string]*node)
+ p.doc = n
+ p.skip()
+ n.children = append(n.children, p.parse())
+ if p.event.typ != yaml_DOCUMENT_END_EVENT {
+ panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
+ }
+ p.skip()
+ return n
+}
+
+func (p *parser) alias() *node {
+ n := p.node(aliasNode)
+ n.value = string(p.event.anchor)
+ p.skip()
+ return n
+}
+
+func (p *parser) scalar() *node {
+ n := p.node(scalarNode)
+ n.value = string(p.event.value)
+ n.tag = string(p.event.tag)
+ n.implicit = p.event.implicit
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ return n
+}
+
+func (p *parser) sequence() *node {
+ n := p.node(sequenceNode)
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ for p.event.typ != yaml_SEQUENCE_END_EVENT {
+ n.children = append(n.children, p.parse())
+ }
+ p.skip()
+ return n
+}
+
+func (p *parser) mapping() *node {
+ n := p.node(mappingNode)
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ for p.event.typ != yaml_MAPPING_END_EVENT {
+ n.children = append(n.children, p.parse(), p.parse())
+ }
+ p.skip()
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *node
+ aliases map[string]bool
+ mapType reflect.Type
+ terrors []string
+ strict bool
+}
+
+var (
+ mapItemType = reflect.TypeOf(MapItem{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = defaultMapType.Elem()
+)
+
+func newDecoder(strict bool) *decoder {
+ d := &decoder{mapType: defaultMapType, strict: strict}
+ d.aliases = make(map[string]bool)
+ return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+ if n.tag != "" {
+ tag = n.tag
+ }
+ value := n.value
+ if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+ switch n.kind {
+ case documentNode:
+ return d.document(n, out)
+ case aliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.kind {
+ case scalarNode:
+ good = d.scalar(n, out)
+ case mappingNode:
+ good = d.mapping(n, out)
+ case sequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+ }
+ return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+ if len(n.children) == 1 {
+ d.doc = n
+ d.unmarshal(n.children[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+ an, ok := d.doc.anchors[n.value]
+ if !ok {
+ failf("unknown anchor '%s' referenced", n.value)
+ }
+ if d.aliases[n.value] {
+ failf("anchor '%s' value contains itself", n.value)
+ }
+ d.aliases[n.value] = true
+ good = d.unmarshal(an, out)
+ delete(d.aliases, n.value)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
+ var tag string
+ var resolved interface{}
+ if n.tag == "" && !n.implicit {
+ tag = yaml_STR_TAG
+ resolved = n.value
+ } else {
+ tag, resolved = resolve(n.tag, n.value)
+ if tag == yaml_BINARY_TAG {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ if out.Kind() == reflect.Map && !out.CanAddr() {
+ resetMap(out)
+ } else {
+ out.Set(reflect.Zero(out.Type()))
+ }
+ return true
+ }
+ if s, ok := resolved.(string); ok && out.CanAddr() {
+ if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
+ err := u.UnmarshalText([]byte(s))
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == yaml_BINARY_TAG {
+ out.SetString(resolved.(string))
+ good = true
+ } else if resolved != nil {
+ out.SetString(n.value)
+ good = true
+ }
+ case reflect.Interface:
+ if resolved == nil {
+ out.Set(reflect.Zero(out.Type()))
+ } else {
+ out.Set(reflect.ValueOf(resolved))
+ }
+ good = true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch resolved := resolved.(type) {
+ case int:
+ if !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case int64:
+ if !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ good = true
+ }
+ case uint64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case float64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ good = true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ good = true
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ good = true
+ case int64:
+ out.SetFloat(float64(resolved))
+ good = true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ good = true
+ case float64:
+ out.SetFloat(resolved)
+ good = true
+ }
+ case reflect.Ptr:
+ if out.Type().Elem() == reflect.TypeOf(resolved) {
+ // TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+ elem := reflect.New(out.Type().Elem())
+ elem.Elem().Set(reflect.ValueOf(resolved))
+ out.Set(elem)
+ good = true
+ }
+ }
+ if !good {
+ d.terror(n, tag, out)
+ }
+ return good
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+ l := len(n.children)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, yaml_SEQ_TAG, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.children[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ out.Set(out.Slice(0, j))
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Slice:
+ return d.mappingSlice(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ if d.mapType.Kind() == reflect.Map {
+ iface := out
+ out = reflect.MakeMap(d.mapType)
+ iface.Set(out)
+ } else {
+ slicev := reflect.New(d.mapType).Elem()
+ if !d.mappingSlice(n, slicev) {
+ return false
+ }
+ out.Set(slicev)
+ return true
+ }
+ default:
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ mapType := d.mapType
+ if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+ d.mapType = outt
+ }
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.children[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.children[i+1], e) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+ outt := out.Type()
+ if outt.Elem() != mapItemType {
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+
+ mapType := d.mapType
+ d.mapType = outt
+
+ var slice []MapItem
+ var l = len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ item := MapItem{}
+ k := reflect.ValueOf(&item.Key).Elem()
+ if d.unmarshal(n.children[i], k) {
+ v := reflect.ValueOf(&item.Value).Elem()
+ if d.unmarshal(n.children[i+1], v) {
+ slice = append(slice, item)
+ }
+ }
+ }
+ out.Set(reflect.ValueOf(slice))
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+ name := settableValueOf("")
+ l := len(n.children)
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+ elemType = inlineMap.Type().Elem()
+ }
+
+ for i := 0; i < l; i += 2 {
+ ni := n.children[i]
+ if isMerge(ni) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = out.FieldByIndex(info.Inline)
+ }
+ d.unmarshal(n.children[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.children[i+1], value)
+ inlineMap.SetMapIndex(name, value)
+ } else if d.strict {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in struct %s", ni.line+1, name.String(), out.Type()))
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+ switch n.kind {
+ case mappingNode:
+ d.unmarshal(n, out)
+ case aliasNode:
+ an, ok := d.doc.anchors[n.value]
+ if ok && an.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case sequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.children) - 1; i >= 0; i-- {
+ ni := n.children[i]
+ if ni.kind == aliasNode {
+ an, ok := d.doc.anchors[ni.value]
+ if ok && an.kind != mappingNode {
+ failWantMap()
+ }
+ } else if ni.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *node) bool {
+ return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/emitterc.go
new file mode 100644
index 00000000..dcaf502f
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -0,0 +1,1684 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ emitter.column = 0
+ emitter.line++
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ emitter.column = 0
+ emitter.line++
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+ return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an achor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ emitter.indention = true
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/encode.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/encode.go
new file mode 100644
index 00000000..84f84995
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/encode.go
@@ -0,0 +1,306 @@
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+}
+
+func newEncoder() (e *encoder) {
+ e = &encoder{}
+ e.must(yaml_emitter_initialize(&e.emitter))
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
+ e.emit()
+ e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
+ e.emit()
+ return e
+}
+
+func (e *encoder) finish() {
+ e.must(yaml_document_end_event_initialize(&e.event, true))
+ e.emit()
+ e.emitter.open_ended = false
+ e.must(yaml_stream_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
+ e.must(false)
+ }
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ if !in.IsValid() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ if m, ok := iface.(Marshaler); ok {
+ v, err := m.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ in = reflect.ValueOf(v)
+ } else if m, ok := iface.(encoding.TextMarshaler); ok {
+ text, err := m.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ if in.IsNil() {
+ e.nilv()
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ if in.IsNil() {
+ e.nilv()
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice:
+ if in.Type().Elem() == mapItemType {
+ e.itemsv(tag, in)
+ } else {
+ e.slicev(tag, in)
+ }
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if in.Type() == durationType {
+ e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+ } else {
+ e.intv(tag, in)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+ for _, item := range slice {
+ e.marshal("", reflect.ValueOf(item.Key))
+ e.marshal("", reflect.ValueOf(item.Value))
+ }
+ })
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = in.FieldByIndex(info.Inline)
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ f()
+ e.must(yaml_mapping_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ rtag, rs := resolve("", s)
+ if rtag == yaml_BINARY_TAG {
+ if tag == "" || tag == yaml_STR_TAG {
+ tag = rtag
+ s = rs.(string)
+ } else if tag == yaml_BINARY_TAG {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ } else {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ }
+ if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else if strings.Contains(s, "\n") {
+ style = yaml_LITERAL_SCALAR_STYLE
+ } else {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // FIXME: Handle 64 bits here.
+ s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+ implicit := tag == ""
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.emit()
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/parserc.go
new file mode 100644
index 00000000..81d05dfe
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/parserc.go
@@ -0,0 +1,1095 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ return &parser.tokens[parser.tokens_head]
+ }
+ return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected <document start>", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/readerc.go
new file mode 100644
index 00000000..f4507917
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/readerc.go
@@ -0,0 +1,394 @@
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/resolve.go
new file mode 100644
index 00000000..232313cc
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/resolve.go
@@ -0,0 +1,208 @@
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+ {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+ {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+ {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+ {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+ {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+ {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", yaml_MERGE_TAG, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ // TODO This can easily be made faster and produce less garbage.
+ if strings.HasPrefix(tag, longTagPrefix) {
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+ return
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt(plain[3:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, -int(intv)
+ } else {
+ return yaml_INT_TAG, -intv
+ }
+ }
+ }
+ // XXX Handle timestamps here.
+
+ default:
+ panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ if tag == yaml_BINARY_TAG {
+ return yaml_BINARY_TAG, in
+ }
+ if utf8.ValidString(in) {
+ return yaml_STR_TAG, in
+ }
+ return yaml_BINARY_TAG, encodeBase64(in)
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/scannerc.go
new file mode 100644
index 00000000..07448445
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -0,0 +1,2711 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // Check if we really need to fetch more tokens.
+ need_more_tokens := false
+
+ if parser.tokens_head == len(parser.tokens) {
+ // Queue is empty.
+ need_more_tokens = true
+ } else {
+ // Check if any potential simple key may occupy the head position.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+ if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+ need_more_tokens = true
+ break
+ }
+ }
+ }
+
+ // We are finished.
+ if !need_more_tokens {
+ break
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // Remove obsolete potential simple keys.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+ // Check for a potential simple key for each flow level.
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+
+ // The specification requires that a simple key
+ //
+ // - is limited to a single line,
+ // - is shorter than 1024 characters.
+ if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ }
+ }
+ return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ // A simple key is required only when it is the first token in the current
+ // line. Therefore it is always allowed. But we add a check anyway.
+ if required && !parser.simple_key_allowed {
+ panic("should not happen")
+ }
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ }
+ simple_key.mark = parser.mark
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // Increase the flow level.
+ parser.flow_level++
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+ }
+ return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if simple_key.possible {
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
+ if parser.flow_level > 0 &&
+ parser.buffer[parser.buffer_pos] == ':' &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found unexpected ':'")
+ return false
+ }
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab character that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violate indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/sorter.go
new file mode 100644
index 00000000..5958822f
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/sorter.go
@@ -0,0 +1,104 @@
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ return bl
+ }
+ var ai, bi int
+ var an, bn int64
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/writerc.go
new file mode 100644
index 00000000..190362f2
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/writerc.go
@@ -0,0 +1,89 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ // If the output encoding is UTF-8, we don't need to recode the buffer.
+ if emitter.encoding == yaml_UTF8_ENCODING {
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+ }
+
+ // Recode the buffer into the raw buffer.
+ var low, high int
+ if emitter.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ high, low = 1, 0
+ }
+
+ pos := 0
+ for pos < emitter.buffer_pos {
+ // See the "reader.c" code for more details on UTF-8 encoding. Note
+ // that we assume that the buffer contains a valid UTF-8 sequence.
+
+ // Read the next UTF-8 character.
+ octet := emitter.buffer[pos]
+
+ var w int
+ var value rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, value = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, value = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, value = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, value = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = emitter.buffer[pos+k]
+ value = (value << 6) + (rune(octet) & 0x3F)
+ }
+ pos += w
+
+ // Write the character.
+ if value < 0x10000 {
+ var b [2]byte
+ b[high] = byte(value >> 8)
+ b[low] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
+ } else {
+ // Write the character using a surrogate pair (check "reader.c").
+ var b [4]byte
+ value -= 0x10000
+ b[high] = byte(0xD8 + (value >> 18))
+ b[low] = byte((value >> 10) & 0xFF)
+ b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
+ b[low+2] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
+ }
+ }
+
+ // Write the raw buffer.
+ if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ emitter.raw_buffer = emitter.raw_buffer[:0]
+ return true
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yaml.go
new file mode 100644
index 00000000..5e3c2dae
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yaml.go
@@ -0,0 +1,357 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+ Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// UnmarshalStrict is like Unmarshal except that any fields that are found
+// in the data that do not have corresponding struct members will result in
+// an error.
+func UnmarshalStrict(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, true)
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder(strict)
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only unmarshalled if they are exported (have an upper case
+// first letter), and are unmarshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Does not apply to zero valued structs.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshal("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ //return nil, errors.New("Option ,inline needs a struct value or map field")
+ return nil, errors.New("Option ,inline needs a struct value field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+func isZero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yamlh.go
new file mode 100644
index 00000000..3caeca04
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -0,0 +1,716 @@
+package yaml
+
+import (
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+ yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+)
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_file io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_file io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yamlprivateh.go
new file mode 100644
index 00000000..8110ce3c
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return ( // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return ( // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return ( // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}