summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/mattermost
diff options
context:
space:
mode:
authorWim <wim@42.be>2020-10-19 23:40:00 +0200
committerGitHub <noreply@github.com>2020-10-19 23:40:00 +0200
commit075a84427f6332aab707d283ad770d69f8816032 (patch)
tree0ff9f56a057919f3fe968e57f6f0b1c0d1f85078 /vendor/github.com/mattermost
parent950f2759bd2b20aa0bdedc3dc9a74d0dafb606d8 (diff)
downloadmatterbridge-msglm-075a84427f6332aab707d283ad770d69f8816032.tar.gz
matterbridge-msglm-075a84427f6332aab707d283ad770d69f8816032.tar.bz2
matterbridge-msglm-075a84427f6332aab707d283ad770d69f8816032.zip
Update vendor (#1265)
Diffstat (limited to 'vendor/github.com/mattermost')
-rw-r--r--vendor/github.com/mattermost/logr/.gitignore36
-rw-r--r--vendor/github.com/mattermost/logr/.travis.yml4
-rw-r--r--vendor/github.com/mattermost/logr/LICENSE21
-rw-r--r--vendor/github.com/mattermost/logr/README.md193
-rw-r--r--vendor/github.com/mattermost/logr/config.go11
-rw-r--r--vendor/github.com/mattermost/logr/const.go34
-rw-r--r--vendor/github.com/mattermost/logr/filter.go26
-rw-r--r--vendor/github.com/mattermost/logr/format/json.go273
-rw-r--r--vendor/github.com/mattermost/logr/format/plain.go75
-rw-r--r--vendor/github.com/mattermost/logr/formatter.go119
-rw-r--r--vendor/github.com/mattermost/logr/go.mod11
-rw-r--r--vendor/github.com/mattermost/logr/go.sum174
-rw-r--r--vendor/github.com/mattermost/logr/levelcache.go98
-rw-r--r--vendor/github.com/mattermost/logr/levelcustom.go45
-rw-r--r--vendor/github.com/mattermost/logr/levelstd.go37
-rw-r--r--vendor/github.com/mattermost/logr/logger.go218
-rw-r--r--vendor/github.com/mattermost/logr/logr.go664
-rw-r--r--vendor/github.com/mattermost/logr/logrec.go189
-rw-r--r--vendor/github.com/mattermost/logr/metrics.go117
-rw-r--r--vendor/github.com/mattermost/logr/target.go299
-rw-r--r--vendor/github.com/mattermost/logr/target/file.go87
-rw-r--r--vendor/github.com/mattermost/logr/target/syslog.go89
-rw-r--r--vendor/github.com/mattermost/logr/target/writer.go40
-rw-r--r--vendor/github.com/mattermost/logr/timeout.go34
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/LICENSE.txt2
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/NOTICE.txt142
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/mlog/default.go44
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/mlog/errors.go30
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/mlog/global.go53
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/mlog/levels.go39
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/mlog/log.go161
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/mlog/logr.go247
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/mlog/syslog.go142
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/mlog/tcp.go274
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/mlog/test-tls-client-cert.pem43
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/mlog/testing.go3
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/bot.go7
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/channel.go18
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history_result.go7
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/channel_search.go17
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/channel_sidebar.go111
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/client4.go573
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/cluster_message.go10
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/command.go49
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/command_args.go4
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/config.go1033
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/file_info.go12
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/group.go11
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/integrity.go58
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/job.go6
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/ldap.go4
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/license.go14
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/link_metadata.go4
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/migration.go4
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/outgoing_webhook.go3
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/permission.go618
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/post.go16
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/preference.go1
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/product_notices.go213
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/push_notification.go1
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/role.go277
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/saml.go1
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/search_params.go11
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/serialized_gen.go1622
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/session.go24
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/status.go3
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/system.go151
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/team_search.go9
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/typing_request.go25
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/upload_session.go141
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/user.go6
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/user_count.go8
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/user_get.go8
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/user_search.go30
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/utils.go8
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/version.go3
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v5/model/websocket_message.go6
77 files changed, 8420 insertions, 781 deletions
diff --git a/vendor/github.com/mattermost/logr/.gitignore b/vendor/github.com/mattermost/logr/.gitignore
new file mode 100644
index 00000000..c2c0a9e2
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/.gitignore
@@ -0,0 +1,36 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+debug
+dynip
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Output of profiler
+*.prof
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+# IntelliJ config
+.idea
+
+# log files
+*.log
+
+# transient directories
+vendor
+output
+build
+app
+logs
+
+# test apps
+test/cmd/testapp1/testapp1
+test/cmd/simple/simple
diff --git a/vendor/github.com/mattermost/logr/.travis.yml b/vendor/github.com/mattermost/logr/.travis.yml
new file mode 100644
index 00000000..e6c7caf1
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/.travis.yml
@@ -0,0 +1,4 @@
+language: go
+sudo: false
+go:
+ - 1.x \ No newline at end of file
diff --git a/vendor/github.com/mattermost/logr/LICENSE b/vendor/github.com/mattermost/logr/LICENSE
new file mode 100644
index 00000000..3bea6788
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 wiggin77
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/mattermost/logr/README.md b/vendor/github.com/mattermost/logr/README.md
new file mode 100644
index 00000000..a25d6de0
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/README.md
@@ -0,0 +1,193 @@
+# logr
+
+[![GoDoc](https://godoc.org/github.com/mattermost/logr?status.svg)](http://godoc.org/github.com/mattermost/logr)
+[![Report Card](https://goreportcard.com/badge/github.com/mattermost/logr)](https://goreportcard.com/report/github.com/mattermost/logr)
+
+Logr is a fully asynchronous, contextual logger for Go.
+
+It is very much inspired by [Logrus](https://github.com/sirupsen/logrus) but addresses two issues:
+
+1. Logr is fully asynchronous, meaning that all formatting and writing is done in the background. Latency sensitive applications benefit from not waiting for logging to complete.
+
+2. Logr provides custom filters which provide more flexibility than Trace, Debug, Info... levels. If you need to temporarily increase verbosity of logging while tracking down a problem you can avoid the fire-hose that typically comes from Debug or Trace by using custom filters.
+
+## Concepts
+
+<!-- markdownlint-disable MD033 -->
+| entity | description |
+| ------ | ----------- |
+| Logr | Engine instance typically instantiated once; used to configure logging.<br>```lgr := &Logr{}```|
+| Logger | Provides contextual logging via fields; lightweight, can be created once and accessed globally or create on demand.<br>```logger := lgr.NewLogger()```<br>```logger2 := logger.WithField("user", "Sam")```|
+| Target | A destination for log items such as console, file, database or just about anything that can be written to. Each target has its own filter/level and formatter, and any number of targets can be added to a Logr. Targets for syslog and any io.Writer are built-in and it is easy to create your own. You can also use any [Logrus hooks](https://github.com/sirupsen/logrus/wiki/Hooks) via a simple [adapter](https://github.com/wiggin77/logrus4logr).|
+| Filter | Determines which logging calls get written versus filtered out. Also determines which logging calls generate a stack trace.<br>```filter := &logr.StdFilter{Lvl: logr.Warn, Stacktrace: logr.Fatal}```|
+| Formatter | Formats the output. Logr includes built-in formatters for JSON and plain text with delimiters. It is easy to create your own formatters or you can also use any [Logrus formatters](https://github.com/sirupsen/logrus#formatters) via a simple [adapter](https://github.com/wiggin77/logrus4logr).<br>```formatter := &format.Plain{Delim: " \| "}```|
+
+## Usage
+
+```go
+// Create Logr instance.
+lgr := &logr.Logr{}
+
+// Create a filter and formatter. Both can be shared by multiple
+// targets.
+filter := &logr.StdFilter{Lvl: logr.Warn, Stacktrace: logr.Error}
+formatter := &format.Plain{Delim: " | "}
+
+// WriterTarget outputs to any io.Writer
+t := target.NewWriterTarget(filter, formatter, os.StdOut, 1000)
+lgr.AddTarget(t)
+
+// One or more Loggers can be created, shared, used concurrently,
+// or created on demand.
+logger := lgr.NewLogger().WithField("user", "Sarah")
+
+// Now we can log to the target(s).
+logger.Debug("login attempt")
+logger.Error("login failed")
+
+// Ensure targets are drained before application exit.
+lgr.Shutdown()
+```
+
+## Fields
+
+Fields allow for contextual logging, meaning information can be added to log statements without changing the statements themselves. Information can be shared across multiple logging statements thus allowing log analysis tools to group them.
+
+Fields are added via Loggers:
+
+```go
+lgr := &Logr{}
+// ... add targets ...
+logger := lgr.NewLogger().WithFields(logr.Fields{
+ "user": user,
+ "role": role})
+logger.Info("login attempt")
+// ... later ...
+logger.Info("login successful")
+```
+
+`Logger.WithFields` can be used to create additional Loggers that add more fields.
+
+Logr fields are inspired by and work the same as [Logrus fields](https://github.com/sirupsen/logrus#fields).
+
+## Filters
+
+Logr supports the traditional seven log levels via `logr.StdFilter`: Panic, Fatal, Error, Warning, Info, Debug, and Trace.
+
+```go
+// When added to a target, this filter will only allow
+// log statements with level severity Warn or higher.
+// It will also generate stack traces for Error or higher.
+filter := &logr.StdFilter{Lvl: logr.Warn, Stacktrace: logr.Error}
+```
+
+Logr also supports custom filters (logr.CustomFilter) which allow fine grained inclusion of log items without turning on the fire-hose.
+
+```go
+ // create custom levels; use IDs > 10.
+ LoginLevel := logr.Level{ID: 100, Name: "login ", Stacktrace: false}
+ LogoutLevel := logr.Level{ID: 101, Name: "logout", Stacktrace: false}
+
+ lgr := &logr.Logr{}
+
+ // create a custom filter with custom levels.
+ filter := &logr.CustomFilter{}
+ filter.Add(LoginLevel, LogoutLevel)
+
+ formatter := &format.Plain{Delim: " | "}
+ tgr := target.NewWriterTarget(filter, formatter, os.StdOut, 1000)
+ lgr.AddTarget(tgr)
+ logger := lgr.NewLogger().WithFields(logr.Fields{"user": "Bob", "role": "admin"})
+
+ logger.Log(LoginLevel, "this item will get logged")
+ logger.Debug("won't be logged since Debug wasn't added to custom filter")
+```
+
+Both filter types allow you to determine which levels require a stack trace to be output. Note that generating stack traces cannot happen fully asynchronously and thus add latency to the calling goroutine.
+
+## Targets
+
+There are built-in targets for outputting to syslog, file, or any `io.Writer`. More will be added.
+
+You can use any [Logrus hooks](https://github.com/sirupsen/logrus/wiki/Hooks) via a simple [adapter](https://github.com/wiggin77/logrus4logr).
+
+You can create your own target by implementing the [Target](./target.go) interface.
+
+An easier method is to use the [logr.Basic](./target.go) type target and build your functionality on that. Basic handles all the queuing and other plumbing so you only need to implement two methods. Example target that outputs to `io.Writer`:
+
+```go
+type Writer struct {
+ logr.Basic
+ out io.Writer
+}
+
+func NewWriterTarget(filter logr.Filter, formatter logr.Formatter, out io.Writer, maxQueue int) *Writer {
+ w := &Writer{out: out}
+ w.Basic.Start(w, w, filter, formatter, maxQueue)
+ return w
+}
+
+// Write will always be called by a single goroutine, so no locking needed.
+// Just convert a log record to a []byte using the formatter and output the
+// bytes to your sink.
+func (w *Writer) Write(rec *logr.LogRec) error {
+ _, stacktrace := w.IsLevelEnabled(rec.Level())
+
+ // take a buffer from the pool to avoid allocations or just allocate a new one.
+ buf := rec.Logger().Logr().BorrowBuffer()
+ defer rec.Logger().Logr().ReleaseBuffer(buf)
+
+ buf, err := w.Formatter().Format(rec, stacktrace, buf)
+ if err != nil {
+ return err
+ }
+ _, err = w.out.Write(buf.Bytes())
+ return err
+}
+```
+
+## Formatters
+
+Logr has two built-in formatters, one for JSON and the other plain, delimited text.
+
+You can use any [Logrus formatters](https://github.com/sirupsen/logrus#formatters) via a simple [adapter](https://github.com/wiggin77/logrus4logr).
+
+You can create your own formatter by implementing the [Formatter](./formatter.go) interface:
+
+```go
+Format(rec *LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error)
+```
+
+## Handlers
+
+When creating the Logr instance, you can add several handlers that get called when exceptional events occur:
+
+### ```Logr.OnLoggerError(err error)```
+
+Called any time an internal logging error occurs. For example, this can happen when a target cannot connect to its data sink.
+
+It may be tempting to log this error, however there is a danger that logging this will simply generate another error and so on. If you must log it, use a target and custom level specifically for this event and ensure it cannot generate more errors.
+
+### ```Logr.OnQueueFull func(rec *LogRec, maxQueueSize int) bool```
+
+Called on an attempt to add a log record to a full Logr queue. This generally means the Logr maximum queue size is too small, or at least one target is very slow. Logr maximum queue size can be changed before adding any targets via:
+
+```go
+lgr := logr.Logr{MaxQueueSize: 10000}
+```
+
+Returning true will drop the log record. False will block until the log record can be added, which creates a natural throttle at the expense of latency for the calling goroutine. The default is to block.
+
+### ```Logr.OnTargetQueueFull func(target Target, rec *LogRec, maxQueueSize int) bool```
+
+Called on an attempt to add a log record to a full target queue. This generally means your target's max queue size is too small, or the target is very slow to output.
+
+As with the Logr queue, returning true will drop the log record. False will block until the log record can be added, which creates a natural throttle at the expense of latency for the calling goroutine. The default is to block.
+
+### ```Logr.OnExit func(code int) and Logr.OnPanic func(err interface{})```
+
+OnExit and OnPanic are called when the Logger.FatalXXX and Logger.PanicXXX functions are called respectively.
+
+In both cases the default behavior is to shut down gracefully, draining all targets, and calling `os.Exit` or `panic` respectively.
+
+When adding your own handlers, be sure to call `Logr.Shutdown` before exiting the application to avoid losing log records.
diff --git a/vendor/github.com/mattermost/logr/config.go b/vendor/github.com/mattermost/logr/config.go
new file mode 100644
index 00000000..83d4b0c1
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/config.go
@@ -0,0 +1,11 @@
+package logr
+
+import (
+ "fmt"
+
+ "github.com/wiggin77/cfg"
+)
+
+func ConfigLogger(config *cfg.Config) error {
+ return fmt.Errorf("Not implemented yet")
+}
diff --git a/vendor/github.com/mattermost/logr/const.go b/vendor/github.com/mattermost/logr/const.go
new file mode 100644
index 00000000..704d0507
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/const.go
@@ -0,0 +1,34 @@
+package logr
+
+import "time"
+
+// Defaults.
+const (
+ // DefaultMaxQueueSize is the default maximum queue size for Logr instances.
+ DefaultMaxQueueSize = 1000
+
+ // DefaultMaxStackFrames is the default maximum max number of stack frames collected
+ // when generating stack traces for logging.
+ DefaultMaxStackFrames = 30
+
+ // MaxLevelID is the maximum value of a level ID. Some level cache implementations will
+ // allocate a cache of this size. Cannot exceed uint.
+ MaxLevelID = 256
+
+ // DefaultEnqueueTimeout is the default amount of time a log record can take to be queued.
+ // This only applies to blocking enqueue which happen after `logr.OnQueueFull` is called
+ // and returns false.
+ DefaultEnqueueTimeout = time.Second * 30
+
+ // DefaultShutdownTimeout is the default amount of time `logr.Shutdown` can execute before
+ // timing out.
+ DefaultShutdownTimeout = time.Second * 30
+
+ // DefaultFlushTimeout is the default amount of time `logr.Flush` can execute before
+ // timing out.
+ DefaultFlushTimeout = time.Second * 30
+
+ // DefaultMaxPooledBuffer is the maximum size a pooled buffer can be.
+ // Buffers that grow beyond this size are garbage collected.
+ DefaultMaxPooledBuffer = 1024 * 1024
+)
diff --git a/vendor/github.com/mattermost/logr/filter.go b/vendor/github.com/mattermost/logr/filter.go
new file mode 100644
index 00000000..6e654cd7
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/filter.go
@@ -0,0 +1,26 @@
+package logr
+
+// LevelID is the unique id of each level.
+type LevelID uint
+
+// Level provides a mechanism to enable/disable specific log lines.
+type Level struct {
+ ID LevelID
+ Name string
+ Stacktrace bool
+}
+
+// String returns the name of this level.
+func (level Level) String() string {
+ return level.Name
+}
+
+// Filter allows targets to determine which Level(s) are active
+// for logging and which Level(s) require a stack trace to be output.
+// A default implementation using "panic, fatal..." is provided, and
+// a more flexible alternative implementation is also provided that
+// allows any number of custom levels.
+type Filter interface {
+ IsEnabled(Level) bool
+ IsStacktraceEnabled(Level) bool
+}
diff --git a/vendor/github.com/mattermost/logr/format/json.go b/vendor/github.com/mattermost/logr/format/json.go
new file mode 100644
index 00000000..8f56c6cb
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/format/json.go
@@ -0,0 +1,273 @@
+package format
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/francoispqt/gojay"
+ "github.com/mattermost/logr"
+)
+
+// ContextField is a name/value pair within the context fields.
+type ContextField struct {
+ Key string
+ Val interface{}
+}
+
+// JSON formats log records as JSON.
+type JSON struct {
+ // DisableTimestamp disables output of timestamp field.
+ DisableTimestamp bool
+ // DisableLevel disables output of level field.
+ DisableLevel bool
+ // DisableMsg disables output of msg field.
+ DisableMsg bool
+ // DisableContext disables output of all context fields.
+ DisableContext bool
+ // DisableStacktrace disables output of stack trace.
+ DisableStacktrace bool
+
+ // TimestampFormat is an optional format for timestamps. If empty
+ // then DefTimestampFormat is used.
+ TimestampFormat string
+
+ // Deprecated: this has no effect.
+ Indent string
+
+ // EscapeHTML determines if certain characters (e.g. `<`, `>`, `&`)
+ // are escaped.
+ EscapeHTML bool
+
+ // KeyTimestamp overrides the timestamp field key name.
+ KeyTimestamp string
+
+ // KeyLevel overrides the level field key name.
+ KeyLevel string
+
+ // KeyMsg overrides the msg field key name.
+ KeyMsg string
+
+ // KeyContextFields when not empty will group all context fields
+ // under this key.
+ KeyContextFields string
+
+ // KeyStacktrace overrides the stacktrace field key name.
+ KeyStacktrace string
+
+ // ContextSorter allows custom sorting for the context fields.
+ ContextSorter func(fields logr.Fields) []ContextField
+
+ once sync.Once
+}
+
+// Format converts a log record to bytes in JSON format.
+func (j *JSON) Format(rec *logr.LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) {
+ j.once.Do(j.applyDefaultKeyNames)
+
+ if buf == nil {
+ buf = &bytes.Buffer{}
+ }
+ enc := gojay.BorrowEncoder(buf)
+ defer func() {
+ enc.Release()
+ }()
+
+ sorter := j.ContextSorter
+ if sorter == nil {
+ sorter = j.defaultContextSorter
+ }
+
+ jlr := JSONLogRec{
+ LogRec: rec,
+ JSON: j,
+ stacktrace: stacktrace,
+ sorter: sorter,
+ }
+
+ err := enc.EncodeObject(jlr)
+ if err != nil {
+ return nil, err
+ }
+ buf.WriteByte('\n')
+ return buf, nil
+}
+
+func (j *JSON) applyDefaultKeyNames() {
+ if j.KeyTimestamp == "" {
+ j.KeyTimestamp = "timestamp"
+ }
+ if j.KeyLevel == "" {
+ j.KeyLevel = "level"
+ }
+ if j.KeyMsg == "" {
+ j.KeyMsg = "msg"
+ }
+ if j.KeyStacktrace == "" {
+ j.KeyStacktrace = "stacktrace"
+ }
+}
+
+// defaultContextSorter sorts the context fields alphabetically by key.
+func (j *JSON) defaultContextSorter(fields logr.Fields) []ContextField {
+ keys := make([]string, 0, len(fields))
+ for k := range fields {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ cf := make([]ContextField, 0, len(keys))
+ for _, k := range keys {
+ cf = append(cf, ContextField{Key: k, Val: fields[k]})
+ }
+ return cf
+}
+
+// JSONLogRec decorates a LogRec adding JSON encoding.
+type JSONLogRec struct {
+ *logr.LogRec
+ *JSON
+ stacktrace bool
+ sorter func(fields logr.Fields) []ContextField
+}
+
+// MarshalJSONObject encodes the LogRec as JSON.
+func (rec JSONLogRec) MarshalJSONObject(enc *gojay.Encoder) {
+ if !rec.DisableTimestamp {
+ timestampFmt := rec.TimestampFormat
+ if timestampFmt == "" {
+ timestampFmt = logr.DefTimestampFormat
+ }
+ time := rec.Time()
+ enc.AddTimeKey(rec.KeyTimestamp, &time, timestampFmt)
+ }
+ if !rec.DisableLevel {
+ enc.AddStringKey(rec.KeyLevel, rec.Level().Name)
+ }
+ if !rec.DisableMsg {
+ enc.AddStringKey(rec.KeyMsg, rec.Msg())
+ }
+ if !rec.DisableContext {
+ ctxFields := rec.sorter(rec.Fields())
+ if rec.KeyContextFields != "" {
+ enc.AddObjectKey(rec.KeyContextFields, jsonFields(ctxFields))
+ } else {
+ if len(ctxFields) > 0 {
+ for _, cf := range ctxFields {
+ key := rec.prefixCollision(cf.Key)
+ encodeField(enc, key, cf.Val)
+ }
+ }
+ }
+ }
+ if rec.stacktrace && !rec.DisableStacktrace {
+ frames := rec.StackFrames()
+ if len(frames) > 0 {
+ enc.AddArrayKey(rec.KeyStacktrace, stackFrames(frames))
+ }
+ }
+
+}
+
+// IsNil returns true if the LogRec pointer is nil.
+func (rec JSONLogRec) IsNil() bool {
+ return rec.LogRec == nil
+}
+
+func (rec JSONLogRec) prefixCollision(key string) string {
+ switch key {
+ case rec.KeyTimestamp, rec.KeyLevel, rec.KeyMsg, rec.KeyStacktrace:
+ return rec.prefixCollision("_" + key)
+ }
+ return key
+}
+
+type stackFrames []runtime.Frame
+
+// MarshalJSONArray encodes stackFrames slice as JSON.
+func (s stackFrames) MarshalJSONArray(enc *gojay.Encoder) {
+ for _, frame := range s {
+ enc.AddObject(stackFrame(frame))
+ }
+}
+
+// IsNil returns true if stackFrames is empty slice.
+func (s stackFrames) IsNil() bool {
+ return len(s) == 0
+}
+
+type stackFrame runtime.Frame
+
+// MarshalJSONArray encodes stackFrame as JSON.
+func (f stackFrame) MarshalJSONObject(enc *gojay.Encoder) {
+ enc.AddStringKey("Function", f.Function)
+ enc.AddStringKey("File", f.File)
+ enc.AddIntKey("Line", f.Line)
+}
+
+func (f stackFrame) IsNil() bool {
+ return false
+}
+
+type jsonFields []ContextField
+
+// MarshalJSONObject encodes Fields map to JSON.
+func (f jsonFields) MarshalJSONObject(enc *gojay.Encoder) {
+ for _, ctxField := range f {
+ encodeField(enc, ctxField.Key, ctxField.Val)
+ }
+}
+
+// IsNil returns true if map is nil.
+func (f jsonFields) IsNil() bool {
+ return f == nil
+}
+
+func encodeField(enc *gojay.Encoder, key string, val interface{}) {
+ switch vt := val.(type) {
+ case gojay.MarshalerJSONObject:
+ enc.AddObjectKey(key, vt)
+ case gojay.MarshalerJSONArray:
+ enc.AddArrayKey(key, vt)
+ case string:
+ enc.AddStringKey(key, vt)
+ case error:
+ enc.AddStringKey(key, vt.Error())
+ case bool:
+ enc.AddBoolKey(key, vt)
+ case int:
+ enc.AddIntKey(key, vt)
+ case int64:
+ enc.AddInt64Key(key, vt)
+ case int32:
+ enc.AddIntKey(key, int(vt))
+ case int16:
+ enc.AddIntKey(key, int(vt))
+ case int8:
+ enc.AddIntKey(key, int(vt))
+ case uint64:
+ enc.AddIntKey(key, int(vt))
+ case uint32:
+ enc.AddIntKey(key, int(vt))
+ case uint16:
+ enc.AddIntKey(key, int(vt))
+ case uint8:
+ enc.AddIntKey(key, int(vt))
+ case float64:
+ enc.AddFloatKey(key, vt)
+ case float32:
+ enc.AddFloat32Key(key, vt)
+ case *gojay.EmbeddedJSON:
+ enc.AddEmbeddedJSONKey(key, vt)
+ case time.Time:
+ enc.AddTimeKey(key, &vt, logr.DefTimestampFormat)
+ case *time.Time:
+ enc.AddTimeKey(key, vt, logr.DefTimestampFormat)
+ default:
+ s := fmt.Sprintf("%v", vt)
+ enc.AddStringKey(key, s)
+ }
+}
diff --git a/vendor/github.com/mattermost/logr/format/plain.go b/vendor/github.com/mattermost/logr/format/plain.go
new file mode 100644
index 00000000..3fa92b49
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/format/plain.go
@@ -0,0 +1,75 @@
+package format
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/mattermost/logr"
+)
+
+// Plain is the simplest formatter, outputting only text with
+// no colors.
+type Plain struct {
+ // DisableTimestamp disables output of timestamp field.
+ DisableTimestamp bool
+ // DisableLevel disables output of level field.
+ DisableLevel bool
+ // DisableMsg disables output of msg field.
+ DisableMsg bool
+ // DisableContext disables output of all context fields.
+ DisableContext bool
+ // DisableStacktrace disables output of stack trace.
+ DisableStacktrace bool
+
+ // Delim is an optional delimiter output between each log field.
+ // Defaults to a single space.
+ Delim string
+
+ // TimestampFormat is an optional format for timestamps. If empty
+ // then DefTimestampFormat is used.
+ TimestampFormat string
+}
+
+// Format converts a log record to bytes.
+func (p *Plain) Format(rec *logr.LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) {
+ delim := p.Delim
+ if delim == "" {
+ delim = " "
+ }
+ if buf == nil {
+ buf = &bytes.Buffer{}
+ }
+
+ timestampFmt := p.TimestampFormat
+ if timestampFmt == "" {
+ timestampFmt = logr.DefTimestampFormat
+ }
+
+ if !p.DisableTimestamp {
+ var arr [128]byte
+ tbuf := rec.Time().AppendFormat(arr[:0], timestampFmt)
+ buf.Write(tbuf)
+ buf.WriteString(delim)
+ }
+ if !p.DisableLevel {
+ fmt.Fprintf(buf, "%v%s", rec.Level().Name, delim)
+ }
+ if !p.DisableMsg {
+ fmt.Fprint(buf, rec.Msg(), delim)
+ }
+ if !p.DisableContext {
+ ctx := rec.Fields()
+ if len(ctx) > 0 {
+ logr.WriteFields(buf, ctx, " ")
+ }
+ }
+ if stacktrace && !p.DisableStacktrace {
+ frames := rec.StackFrames()
+ if len(frames) > 0 {
+ buf.WriteString("\n")
+ logr.WriteStacktrace(buf, rec.StackFrames())
+ }
+ }
+ buf.WriteString("\n")
+ return buf, nil
+}
diff --git a/vendor/github.com/mattermost/logr/formatter.go b/vendor/github.com/mattermost/logr/formatter.go
new file mode 100644
index 00000000..bb8df2d4
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/formatter.go
@@ -0,0 +1,119 @@
+package logr
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "runtime"
+ "sort"
+)
+
+// Formatter turns a LogRec into a formatted string.
+type Formatter interface {
+ // Format converts a log record to bytes. If buf is not nil then it will be
+ // be filled with the formatted results, otherwise a new buffer will be allocated.
+ Format(rec *LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error)
+}
+
+const (
+ // DefTimestampFormat is the default time stamp format used by
+ // Plain formatter and others.
+ DefTimestampFormat = "2006-01-02 15:04:05.000 Z07:00"
+)
+
+// DefaultFormatter is the default formatter, outputting only text with
+// no colors and a space delimiter. Use `format.Plain` instead.
+type DefaultFormatter struct {
+}
+
+// Format converts a log record to bytes.
+func (p *DefaultFormatter) Format(rec *LogRec, stacktrace bool, buf *bytes.Buffer) (*bytes.Buffer, error) {
+ if buf == nil {
+ buf = &bytes.Buffer{}
+ }
+ delim := " "
+ timestampFmt := DefTimestampFormat
+
+ fmt.Fprintf(buf, "%s%s", rec.Time().Format(timestampFmt), delim)
+ fmt.Fprintf(buf, "%v%s", rec.Level(), delim)
+ fmt.Fprint(buf, rec.Msg(), delim)
+
+ ctx := rec.Fields()
+ if len(ctx) > 0 {
+ WriteFields(buf, ctx, " ")
+ }
+
+ if stacktrace {
+ frames := rec.StackFrames()
+ if len(frames) > 0 {
+ buf.WriteString("\n")
+ WriteStacktrace(buf, rec.StackFrames())
+ }
+ }
+ buf.WriteString("\n")
+
+ return buf, nil
+}
+
+// WriteFields writes zero or more name value pairs to the io.Writer.
+// The pairs are sorted by key name and output in key=value format
+// with optional separator between fields.
+func WriteFields(w io.Writer, flds Fields, separator string) {
+ keys := make([]string, 0, len(flds))
+ for k := range flds {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ sep := ""
+ for _, key := range keys {
+ writeField(w, key, flds[key], sep)
+ sep = separator
+ }
+}
+
+func writeField(w io.Writer, key string, val interface{}, sep string) {
+ var template string
+ switch v := val.(type) {
+ case error:
+ val := v.Error()
+ if shouldQuote(val) {
+ template = "%s%s=%q"
+ } else {
+ template = "%s%s=%s"
+ }
+ case string:
+ if shouldQuote(v) {
+ template = "%s%s=%q"
+ } else {
+ template = "%s%s=%s"
+ }
+ default:
+ template = "%s%s=%v"
+ }
+ fmt.Fprintf(w, template, sep, key, val)
+}
+
+// shouldQuote returns true if val contains any characters that might be unsafe
+// when injecting log output into an aggregator, viewer or report.
+func shouldQuote(val string) bool {
+ for _, c := range val {
+ if !((c >= '0' && c <= '9') ||
+ (c >= 'a' && c <= 'z') ||
+ (c >= 'A' && c <= 'Z')) {
+ return true
+ }
+ }
+ return false
+}
+
+// WriteStacktrace formats and outputs a stack trace to an io.Writer.
+func WriteStacktrace(w io.Writer, frames []runtime.Frame) {
+ for _, frame := range frames {
+ if frame.Function != "" {
+ fmt.Fprintf(w, " %s\n", frame.Function)
+ }
+ if frame.File != "" {
+ fmt.Fprintf(w, " %s:%d\n", frame.File, frame.Line)
+ }
+ }
+}
diff --git a/vendor/github.com/mattermost/logr/go.mod b/vendor/github.com/mattermost/logr/go.mod
new file mode 100644
index 00000000..e8e8acfb
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/go.mod
@@ -0,0 +1,11 @@
+module github.com/mattermost/logr
+
+go 1.12
+
+require (
+ github.com/francoispqt/gojay v1.2.13
+ github.com/stretchr/testify v1.2.2
+ github.com/wiggin77/cfg v1.0.2
+ github.com/wiggin77/merror v1.0.2
+ gopkg.in/natefinch/lumberjack.v2 v2.0.0
+)
diff --git a/vendor/github.com/mattermost/logr/go.sum b/vendor/github.com/mattermost/logr/go.sum
new file mode 100644
index 00000000..ea688513
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/go.sum
@@ -0,0 +1,174 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
+dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
+dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
+dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
+dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
+git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
+github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
+github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
+github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
+github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
+github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
+github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
+github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
+github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
+github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
+github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
+github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
+github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
+github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
+github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
+github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
+github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
+github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
+github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
+github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
+github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
+github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
+github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
+github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
+github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
+github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
+github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
+github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
+github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
+github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
+github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
+github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
+github.com/wiggin77/cfg v1.0.2 h1:NBUX+iJRr+RTncTqTNvajHwzduqbhCQjEqxLHr6Fk7A=
+github.com/wiggin77/cfg v1.0.2/go.mod h1:b3gotba2e5bXTqTW48DwIFoLc+4lWKP7WPi/CdvZ4aE=
+github.com/wiggin77/merror v1.0.2 h1:V0nH9eFp64ASyaXC+pB5WpvBoCg7NUwvaCSKdzlcHqw=
+github.com/wiggin77/merror v1.0.2/go.mod h1:uQTcIU0Z6jRK4OwqganPYerzQxSFJ4GSHM3aurxxQpg=
+go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
+go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
+golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
+golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
+google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
+sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
diff --git a/vendor/github.com/mattermost/logr/levelcache.go b/vendor/github.com/mattermost/logr/levelcache.go
new file mode 100644
index 00000000..2cefb61d
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/levelcache.go
@@ -0,0 +1,98 @@
+package logr
+
+import (
+ "fmt"
+ "sync"
+)
+
+// LevelStatus represents whether a level is enabled and
+// requires a stack trace.
+type LevelStatus struct {
+ Enabled bool
+ Stacktrace bool
+ empty bool
+}
+
+type levelCache interface {
+ setup()
+ get(id LevelID) (LevelStatus, bool)
+ put(id LevelID, status LevelStatus) error
+ clear()
+}
+
+// syncMapLevelCache uses sync.Map which may better handle large concurrency
+// scenarios.
+type syncMapLevelCache struct {
+ m sync.Map
+}
+
+func (c *syncMapLevelCache) setup() {
+ c.clear()
+}
+
+func (c *syncMapLevelCache) get(id LevelID) (LevelStatus, bool) {
+ if id > MaxLevelID {
+ return LevelStatus{}, false
+ }
+ s, _ := c.m.Load(id)
+ status := s.(LevelStatus)
+ return status, !status.empty
+}
+
+func (c *syncMapLevelCache) put(id LevelID, status LevelStatus) error {
+ if id > MaxLevelID {
+ return fmt.Errorf("level id cannot exceed MaxLevelID (%d)", MaxLevelID)
+ }
+ c.m.Store(id, status)
+ return nil
+}
+
+func (c *syncMapLevelCache) clear() {
+ var i LevelID
+ for i = 0; i < MaxLevelID; i++ {
+ c.m.Store(i, LevelStatus{empty: true})
+ }
+}
+
+// arrayLevelCache using array and a mutex.
+type arrayLevelCache struct {
+ arr [MaxLevelID + 1]LevelStatus
+ mux sync.RWMutex
+}
+
+func (c *arrayLevelCache) setup() {
+ c.clear()
+}
+
+//var dummy = LevelStatus{}
+
+func (c *arrayLevelCache) get(id LevelID) (LevelStatus, bool) {
+ if id > MaxLevelID {
+ return LevelStatus{}, false
+ }
+ c.mux.RLock()
+ status := c.arr[id]
+ ok := !status.empty
+ c.mux.RUnlock()
+ return status, ok
+}
+
+func (c *arrayLevelCache) put(id LevelID, status LevelStatus) error {
+ if id > MaxLevelID {
+ return fmt.Errorf("level id cannot exceed MaxLevelID (%d)", MaxLevelID)
+ }
+ c.mux.Lock()
+ defer c.mux.Unlock()
+
+ c.arr[id] = status
+ return nil
+}
+
+func (c *arrayLevelCache) clear() {
+ c.mux.Lock()
+ defer c.mux.Unlock()
+
+ for i := range c.arr {
+ c.arr[i] = LevelStatus{empty: true}
+ }
+}
diff --git a/vendor/github.com/mattermost/logr/levelcustom.go b/vendor/github.com/mattermost/logr/levelcustom.go
new file mode 100644
index 00000000..384fe4e9
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/levelcustom.go
@@ -0,0 +1,45 @@
+package logr
+
+import (
+ "sync"
+)
+
+// CustomFilter allows targets to enable logging via a list of levels.
+type CustomFilter struct {
+ mux sync.RWMutex
+ levels map[LevelID]Level
+}
+
+// IsEnabled returns true if the specified Level exists in this list.
+func (st *CustomFilter) IsEnabled(level Level) bool {
+ st.mux.RLock()
+ defer st.mux.RUnlock()
+ _, ok := st.levels[level.ID]
+ return ok
+}
+
+// IsStacktraceEnabled returns true if the specified Level requires a stack trace.
+func (st *CustomFilter) IsStacktraceEnabled(level Level) bool {
+ st.mux.RLock()
+ defer st.mux.RUnlock()
+ lvl, ok := st.levels[level.ID]
+ if ok {
+ return lvl.Stacktrace
+ }
+ return false
+}
+
+// Add adds one or more levels to the list. Adding a level enables logging for
+// that level on any targets using this CustomFilter.
+func (st *CustomFilter) Add(levels ...Level) {
+ st.mux.Lock()
+ defer st.mux.Unlock()
+
+ if st.levels == nil {
+ st.levels = make(map[LevelID]Level)
+ }
+
+ for _, s := range levels {
+ st.levels[s.ID] = s
+ }
+}
diff --git a/vendor/github.com/mattermost/logr/levelstd.go b/vendor/github.com/mattermost/logr/levelstd.go
new file mode 100644
index 00000000..f5e0fa46
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/levelstd.go
@@ -0,0 +1,37 @@
+package logr
+
+// StdFilter allows targets to filter via classic log levels where any level
+// beyond a certain verbosity/severity is enabled.
+type StdFilter struct {
+ Lvl Level
+ Stacktrace Level
+}
+
+// IsEnabled returns true if the specified Level is at or above this verbosity. Also
+// determines if a stack trace is required.
+func (lt StdFilter) IsEnabled(level Level) bool {
+ return level.ID <= lt.Lvl.ID
+}
+
+// IsStacktraceEnabled returns true if the specified Level requires a stack trace.
+func (lt StdFilter) IsStacktraceEnabled(level Level) bool {
+ return level.ID <= lt.Stacktrace.ID
+}
+
+var (
+ // Panic is the highest level of severity. Logs the message and then panics.
+ Panic = Level{ID: 0, Name: "panic"}
+ // Fatal designates a catastrophic error. Logs the message and then calls
+ // `logr.Exit(1)`.
+ Fatal = Level{ID: 1, Name: "fatal"}
+ // Error designates a serious but possibly recoverable error.
+ Error = Level{ID: 2, Name: "error"}
+ // Warn designates non-critical error.
+ Warn = Level{ID: 3, Name: "warn"}
+ // Info designates information regarding application events.
+ Info = Level{ID: 4, Name: "info"}
+ // Debug designates verbose information typically used for debugging.
+ Debug = Level{ID: 5, Name: "debug"}
+ // Trace designates the highest verbosity of log output.
+ Trace = Level{ID: 6, Name: "trace"}
+)
diff --git a/vendor/github.com/mattermost/logr/logger.go b/vendor/github.com/mattermost/logr/logger.go
new file mode 100644
index 00000000..c2386312
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/logger.go
@@ -0,0 +1,218 @@
+package logr
+
+import (
+ "fmt"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Logger provides context for logging via fields.
+type Logger struct {
+ logr *Logr
+ fields Fields
+}
+
+// Logr returns the `Logr` instance that created this `Logger`.
+func (logger Logger) Logr() *Logr {
+ return logger.logr
+}
+
+// WithField creates a new `Logger` with any existing fields
+// plus the new one.
+func (logger Logger) WithField(key string, value interface{}) Logger {
+ return logger.WithFields(Fields{key: value})
+}
+
+// WithFields creates a new `Logger` with any existing fields
+// plus the new ones.
+func (logger Logger) WithFields(fields Fields) Logger {
+ l := Logger{logr: logger.logr}
+ // if parent has no fields then avoid creating a new map.
+ oldLen := len(logger.fields)
+ if oldLen == 0 {
+ l.fields = fields
+ return l
+ }
+
+ l.fields = make(Fields, len(fields)+oldLen)
+ for k, v := range logger.fields {
+ l.fields[k] = v
+ }
+ for k, v := range fields {
+ l.fields[k] = v
+ }
+ return l
+}
+
+// Log checks that the level matches one or more targets, and
+// if so, generates a log record that is added to the Logr queue.
+// Arguments are handled in the manner of fmt.Print.
+func (logger Logger) Log(lvl Level, args ...interface{}) {
+ status := logger.logr.IsLevelEnabled(lvl)
+ if status.Enabled {
+ rec := NewLogRec(lvl, logger, "", args, status.Stacktrace)
+ logger.logr.enqueue(rec)
+ }
+}
+
+// Trace is a convenience method equivalent to `Log(TraceLevel, args...)`.
+func (logger Logger) Trace(args ...interface{}) {
+ logger.Log(Trace, args...)
+}
+
+// Debug is a convenience method equivalent to `Log(DebugLevel, args...)`.
+func (logger Logger) Debug(args ...interface{}) {
+ logger.Log(Debug, args...)
+}
+
+// Print ensures compatibility with std lib logger.
+func (logger Logger) Print(args ...interface{}) {
+ logger.Info(args...)
+}
+
+// Info is a convenience method equivalent to `Log(InfoLevel, args...)`.
+func (logger Logger) Info(args ...interface{}) {
+ logger.Log(Info, args...)
+}
+
+// Warn is a convenience method equivalent to `Log(WarnLevel, args...)`.
+func (logger Logger) Warn(args ...interface{}) {
+ logger.Log(Warn, args...)
+}
+
+// Error is a convenience method equivalent to `Log(ErrorLevel, args...)`.
+func (logger Logger) Error(args ...interface{}) {
+ logger.Log(Error, args...)
+}
+
+// Fatal is a convenience method equivalent to `Log(FatalLevel, args...)`
+// followed by a call to os.Exit(1).
+func (logger Logger) Fatal(args ...interface{}) {
+ logger.Log(Fatal, args...)
+ logger.logr.exit(1)
+}
+
+// Panic is a convenience method equivalent to `Log(PanicLevel, args...)`
+// followed by a call to panic().
+func (logger Logger) Panic(args ...interface{}) {
+ logger.Log(Panic, args...)
+ panic(fmt.Sprint(args...))
+}
+
+//
+// Printf style
+//
+
+// Logf checks that the level matches one or more targets, and
+// if so, generates a log record that is added to the main
+// queue (channel). Arguments are handled in the manner of fmt.Printf.
+func (logger Logger) Logf(lvl Level, format string, args ...interface{}) {
+ status := logger.logr.IsLevelEnabled(lvl)
+ if status.Enabled {
+ rec := NewLogRec(lvl, logger, format, args, status.Stacktrace)
+ logger.logr.enqueue(rec)
+ }
+}
+
+// Tracef is a convenience method equivalent to `Logf(TraceLevel, args...)`.
+func (logger Logger) Tracef(format string, args ...interface{}) {
+ logger.Logf(Trace, format, args...)
+}
+
+// Debugf is a convenience method equivalent to `Logf(DebugLevel, args...)`.
+func (logger Logger) Debugf(format string, args ...interface{}) {
+ logger.Logf(Debug, format, args...)
+}
+
+// Infof is a convenience method equivalent to `Logf(InfoLevel, args...)`.
+func (logger Logger) Infof(format string, args ...interface{}) {
+ logger.Logf(Info, format, args...)
+}
+
+// Printf ensures compatibility with std lib logger.
+func (logger Logger) Printf(format string, args ...interface{}) {
+ logger.Infof(format, args...)
+}
+
+// Warnf is a convenience method equivalent to `Logf(WarnLevel, args...)`.
+func (logger Logger) Warnf(format string, args ...interface{}) {
+ logger.Logf(Warn, format, args...)
+}
+
+// Errorf is a convenience method equivalent to `Logf(ErrorLevel, args...)`.
+func (logger Logger) Errorf(format string, args ...interface{}) {
+ logger.Logf(Error, format, args...)
+}
+
+// Fatalf is a convenience method equivalent to `Logf(FatalLevel, args...)`
+// followed by a call to os.Exit(1).
+func (logger Logger) Fatalf(format string, args ...interface{}) {
+ logger.Logf(Fatal, format, args...)
+ logger.logr.exit(1)
+}
+
+// Panicf is a convenience method equivalent to `Logf(PanicLevel, args...)`
+// followed by a call to panic().
+func (logger Logger) Panicf(format string, args ...interface{}) {
+ logger.Logf(Panic, format, args...)
+}
+
+//
+// Println style
+//
+
+// Logln checks that the level matches one or more targets, and
+// if so, generates a log record that is added to the main
+// queue (channel). Arguments are handled in the manner of fmt.Println.
+func (logger Logger) Logln(lvl Level, args ...interface{}) {
+ status := logger.logr.IsLevelEnabled(lvl)
+ if status.Enabled {
+ rec := NewLogRec(lvl, logger, "", args, status.Stacktrace)
+ rec.newline = true
+ logger.logr.enqueue(rec)
+ }
+}
+
+// Traceln is a convenience method equivalent to `Logln(TraceLevel, args...)`.
+func (logger Logger) Traceln(args ...interface{}) {
+ logger.Logln(Trace, args...)
+}
+
+// Debugln is a convenience method equivalent to `Logln(DebugLevel, args...)`.
+func (logger Logger) Debugln(args ...interface{}) {
+ logger.Logln(Debug, args...)
+}
+
+// Infoln is a convenience method equivalent to `Logln(InfoLevel, args...)`.
+func (logger Logger) Infoln(args ...interface{}) {
+ logger.Logln(Info, args...)
+}
+
+// Println ensures compatibility with std lib logger.
+func (logger Logger) Println(args ...interface{}) {
+ logger.Infoln(args...)
+}
+
+// Warnln is a convenience method equivalent to `Logln(WarnLevel, args...)`.
+func (logger Logger) Warnln(args ...interface{}) {
+ logger.Logln(Warn, args...)
+}
+
+// Errorln is a convenience method equivalent to `Logln(ErrorLevel, args...)`.
+func (logger Logger) Errorln(args ...interface{}) {
+ logger.Logln(Error, args...)
+}
+
+// Fatalln is a convenience method equivalent to `Logln(FatalLevel, args...)`
+// followed by a call to os.Exit(1).
+func (logger Logger) Fatalln(args ...interface{}) {
+ logger.Logln(Fatal, args...)
+ logger.logr.exit(1)
+}
+
+// Panicln is a convenience method equivalent to `Logln(PanicLevel, args...)`
+// followed by a call to panic().
+func (logger Logger) Panicln(args ...interface{}) {
+ logger.Logln(Panic, args...)
+}
diff --git a/vendor/github.com/mattermost/logr/logr.go b/vendor/github.com/mattermost/logr/logr.go
new file mode 100644
index 00000000..631366a5
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/logr.go
@@ -0,0 +1,664 @@
+package logr
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/wiggin77/cfg"
+ "github.com/wiggin77/merror"
+)
+
+// Logr maintains a list of log targets and accepts incoming
+// log records.
+type Logr struct {
+ tmux sync.RWMutex // target mutex
+ targets []Target
+
+ mux sync.RWMutex
+ maxQueueSizeActual int
+ in chan *LogRec
+ done chan struct{}
+ once sync.Once
+ shutdown bool
+ lvlCache levelCache
+
+ metricsInitOnce sync.Once
+ metricsCloseOnce sync.Once
+ metricsDone chan struct{}
+ metrics MetricsCollector
+ queueSizeGauge Gauge
+ loggedCounter Counter
+ errorCounter Counter
+
+ bufferPool sync.Pool
+
+ // MaxQueueSize is the maximum number of log records that can be queued.
+ // If exceeded, `OnQueueFull` is called which determines if the log
+ // record will be dropped or block until add is successful.
+ // If this is modified, it must be done before `Configure` or
+ // `AddTarget`. Defaults to DefaultMaxQueueSize.
+ MaxQueueSize int
+
+ // OnLoggerError, when not nil, is called any time an internal
+ // logging error occurs. For example, this can happen when a
+ // target cannot connect to its data sink.
+ OnLoggerError func(error)
+
+ // OnQueueFull, when not nil, is called on an attempt to add
+ // a log record to a full Logr queue.
+ // `MaxQueueSize` can be used to modify the maximum queue size.
+ // This function should return quickly, with a bool indicating whether
+ // the log record should be dropped (true) or block until the log record
+ // is successfully added (false). If nil then blocking (false) is assumed.
+ OnQueueFull func(rec *LogRec, maxQueueSize int) bool
+
+ // OnTargetQueueFull, when not nil, is called on an attempt to add
+ // a log record to a full target queue provided the target supports reporting
+ // this condition.
+ // This function should return quickly, with a bool indicating whether
+ // the log record should be dropped (true) or block until the log record
+ // is successfully added (false). If nil then blocking (false) is assumed.
+ OnTargetQueueFull func(target Target, rec *LogRec, maxQueueSize int) bool
+
+ // OnExit, when not nil, is called when a FatalXXX style log API is called.
+ // When nil, then the default behavior is to cleanly shut down this Logr and
+ // call `os.Exit(code)`.
+ OnExit func(code int)
+
+ // OnPanic, when not nil, is called when a PanicXXX style log API is called.
+ // When nil, then the default behavior is to cleanly shut down this Logr and
+ // call `panic(err)`.
+ OnPanic func(err interface{})
+
+ // EnqueueTimeout is the amount of time a log record can take to be queued.
+ // This only applies to blocking enqueue which happen after `logr.OnQueueFull`
+ // is called and returns false.
+ EnqueueTimeout time.Duration
+
+ // ShutdownTimeout is the amount of time `logr.Shutdown` can execute before
+ // timing out.
+ ShutdownTimeout time.Duration
+
+ // FlushTimeout is the amount of time `logr.Flush` can execute before
+ // timing out.
+ FlushTimeout time.Duration
+
+ // UseSyncMapLevelCache can be set to true before the first target is added
+ // when high concurrency (e.g. >32 cores) is expected. This may improve
+ // performance with large numbers of cores - benchmark for your use case.
+ UseSyncMapLevelCache bool
+
+ // MaxPooledFormatBuffer determines the maximum size of a buffer that can be
+ // pooled. To reduce allocations, the buffers needed during formatting (etc)
+ // are pooled. A very large log item will grow a buffer that could stay in
+ // memory indefinitely. This settings lets you control how big a pooled buffer
+ // can be - anything larger will be garbage collected after use.
+ // Defaults to 1MB.
+ MaxPooledBuffer int
+
+ // DisableBufferPool when true disables the buffer pool. See MaxPooledBuffer.
+ DisableBufferPool bool
+
+ // MetricsUpdateFreqMillis determines how often polled metrics are updated
+ // when metrics are enabled.
+ MetricsUpdateFreqMillis int64
+}
+
+// Configure adds/removes targets via the supplied `Config`.
+func (logr *Logr) Configure(config *cfg.Config) error {
+ // TODO
+ return fmt.Errorf("not implemented yet")
+}
+
+func (logr *Logr) ensureInit() {
+ logr.once.Do(func() {
+ defer func() {
+ go logr.start()
+ }()
+
+ logr.mux.Lock()
+ defer logr.mux.Unlock()
+
+ logr.maxQueueSizeActual = logr.MaxQueueSize
+ if logr.maxQueueSizeActual == 0 {
+ logr.maxQueueSizeActual = DefaultMaxQueueSize
+ }
+
+ if logr.maxQueueSizeActual < 0 {
+ logr.maxQueueSizeActual = 0
+ }
+
+ logr.in = make(chan *LogRec, logr.maxQueueSizeActual)
+ logr.done = make(chan struct{})
+
+ if logr.UseSyncMapLevelCache {
+ logr.lvlCache = &syncMapLevelCache{}
+ } else {
+ logr.lvlCache = &arrayLevelCache{}
+ }
+
+ if logr.MaxPooledBuffer == 0 {
+ logr.MaxPooledBuffer = DefaultMaxPooledBuffer
+ }
+ logr.bufferPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ }
+
+ logr.lvlCache.setup()
+ })
+}
+
+// AddTarget adds one or more targets to the logger which will receive
+// log records for outputting.
+func (logr *Logr) AddTarget(targets ...Target) error {
+ if logr.IsShutdown() {
+ return fmt.Errorf("AddTarget called after Logr shut down")
+ }
+
+ logr.ensureInit()
+ metrics := logr.getMetricsCollector()
+ defer logr.ResetLevelCache() // call this after tmux is released
+
+ logr.tmux.Lock()
+ defer logr.tmux.Unlock()
+
+ errs := merror.New()
+ for _, t := range targets {
+ if t == nil {
+ continue
+ }
+
+ logr.targets = append(logr.targets, t)
+ if metrics != nil {
+ if tm, ok := t.(TargetWithMetrics); ok {
+ if err := tm.EnableMetrics(metrics, logr.MetricsUpdateFreqMillis); err != nil {
+ errs.Append(err)
+ }
+ }
+ }
+ }
+ return errs.ErrorOrNil()
+}
+
+// NewLogger creates a Logger using defaults. A `Logger` is light-weight
+// enough to create on-demand, but typically one or more Loggers are
+// created and re-used.
+func (logr *Logr) NewLogger() Logger {
+ logger := Logger{logr: logr}
+ return logger
+}
+
+var levelStatusDisabled = LevelStatus{}
+
+// IsLevelEnabled returns true if at least one target has the specified
+// level enabled. The result is cached so that subsequent checks are fast.
+func (logr *Logr) IsLevelEnabled(lvl Level) LevelStatus {
+ status, ok := logr.isLevelEnabledFromCache(lvl)
+ if ok {
+ return status
+ }
+
+ // Check each target.
+ logr.tmux.RLock()
+ for _, t := range logr.targets {
+ e, s := t.IsLevelEnabled(lvl)
+ if e {
+ status.Enabled = true
+ if s {
+ status.Stacktrace = true
+ break // if both enabled then no sense checking more targets
+ }
+ }
+ }
+ logr.tmux.RUnlock()
+
+ // Cache and return the result.
+ if err := logr.updateLevelCache(lvl.ID, status); err != nil {
+ logr.ReportError(err)
+ return LevelStatus{}
+ }
+ return status
+}
+
+func (logr *Logr) isLevelEnabledFromCache(lvl Level) (LevelStatus, bool) {
+ logr.mux.RLock()
+ defer logr.mux.RUnlock()
+
+ // Don't accept new log records after shutdown.
+ if logr.shutdown {
+ return levelStatusDisabled, true
+ }
+
+ // Check cache. lvlCache may still be nil if no targets added.
+ if logr.lvlCache == nil {
+ return levelStatusDisabled, true
+ }
+ status, ok := logr.lvlCache.get(lvl.ID)
+ if ok {
+ return status, true
+ }
+ return LevelStatus{}, false
+}
+
+func (logr *Logr) updateLevelCache(id LevelID, status LevelStatus) error {
+ logr.mux.RLock()
+ defer logr.mux.RUnlock()
+ if logr.lvlCache != nil {
+ return logr.lvlCache.put(id, status)
+ }
+ return nil
+}
+
+// HasTargets returns true only if at least one target exists within the Logr.
+func (logr *Logr) HasTargets() bool {
+ logr.tmux.RLock()
+ defer logr.tmux.RUnlock()
+ return len(logr.targets) > 0
+}
+
+// TargetInfo provides name and type for a Target.
+type TargetInfo struct {
+ Name string
+ Type string
+}
+
+// TargetInfos enumerates all the targets added to this Logr.
+// The resulting slice represents a snapshot at time of calling.
+func (logr *Logr) TargetInfos() []TargetInfo {
+ logr.tmux.RLock()
+ defer logr.tmux.RUnlock()
+
+ infos := make([]TargetInfo, 0)
+
+ for _, t := range logr.targets {
+ inf := TargetInfo{
+ Name: fmt.Sprintf("%v", t),
+ Type: fmt.Sprintf("%T", t),
+ }
+ infos = append(infos, inf)
+ }
+ return infos
+}
+
+// RemoveTargets safely removes one or more targets based on the filtering method.
+// f should return true to delete the target, false to keep it.
+// When removing a target, best effort is made to write any queued log records before
+// closing, with cxt determining how much time can be spent in total.
+// Note, keep the timeout short since this method blocks certain logging operations.
+func (logr *Logr) RemoveTargets(cxt context.Context, f func(ti TargetInfo) bool) error {
+ var removed bool
+ defer func() {
+ if removed {
+ // call this after tmux is released since
+ // it will lock mux and we don't want to
+ // introduce possible deadlock.
+ logr.ResetLevelCache()
+ }
+ }()
+
+ errs := merror.New()
+
+ logr.tmux.Lock()
+ defer logr.tmux.Unlock()
+
+ cp := make([]Target, 0)
+
+ for _, t := range logr.targets {
+ inf := TargetInfo{
+ Name: fmt.Sprintf("%v", t),
+ Type: fmt.Sprintf("%T", t),
+ }
+ if f(inf) {
+ if err := t.Shutdown(cxt); err != nil {
+ errs.Append(err)
+ }
+ removed = true
+ } else {
+ cp = append(cp, t)
+ }
+ }
+ logr.targets = cp
+ return errs.ErrorOrNil()
+}
+
+// ResetLevelCache resets the cached results of `IsLevelEnabled`. This is
+// called any time a Target is added or a target's level is changed.
+func (logr *Logr) ResetLevelCache() {
+ // Write lock so that new cache entries cannot be stored while we
+ // clear the cache.
+ logr.mux.Lock()
+ defer logr.mux.Unlock()
+ logr.resetLevelCache()
+}
+
+// resetLevelCache empties the level cache without locking.
+// mux.Lock must be held before calling this function.
+func (logr *Logr) resetLevelCache() {
+ // lvlCache may still be nil if no targets added.
+ if logr.lvlCache != nil {
+ logr.lvlCache.clear()
+ }
+}
+
+// enqueue adds a log record to the logr queue. If the queue is full then
+// this function either blocks or the log record is dropped, depending on
+// the result of calling `OnQueueFull`.
+func (logr *Logr) enqueue(rec *LogRec) {
+ if logr.in == nil {
+ logr.ReportError(fmt.Errorf("AddTarget or Configure must be called before enqueue"))
+ }
+
+ select {
+ case logr.in <- rec:
+ default:
+ if logr.OnQueueFull != nil && logr.OnQueueFull(rec, logr.maxQueueSizeActual) {
+ return // drop the record
+ }
+ select {
+ case <-time.After(logr.enqueueTimeout()):
+ logr.ReportError(fmt.Errorf("enqueue timed out for log rec [%v]", rec))
+ case logr.in <- rec: // block until success or timeout
+ }
+ }
+}
+
+// exit is called by one of the FatalXXX style APIS. If `logr.OnExit` is not nil
+// then that method is called, otherwise the default behavior is to shut down this
+// Logr cleanly then call `os.Exit(code)`.
+func (logr *Logr) exit(code int) {
+ if logr.OnExit != nil {
+ logr.OnExit(code)
+ return
+ }
+
+ if err := logr.Shutdown(); err != nil {
+ logr.ReportError(err)
+ }
+ os.Exit(code)
+}
+
+// panic is called by one of the PanicXXX style APIS. If `logr.OnPanic` is not nil
+// then that method is called, otherwise the default behavior is to shut down this
+// Logr cleanly then call `panic(err)`.
+func (logr *Logr) panic(err interface{}) {
+ if logr.OnPanic != nil {
+ logr.OnPanic(err)
+ return
+ }
+
+ if err := logr.Shutdown(); err != nil {
+ logr.ReportError(err)
+ }
+ panic(err)
+}
+
+// Flush blocks while flushing the logr queue and all target queues, by
+// writing existing log records to valid targets.
+// Any attempts to add new log records will block until flush is complete.
+// `logr.FlushTimeout` determines how long flush can execute before
+// timing out. Use `IsTimeoutError` to determine if the returned error is
+// due to a timeout.
+func (logr *Logr) Flush() error {
+ ctx, cancel := context.WithTimeout(context.Background(), logr.flushTimeout())
+ defer cancel()
+ return logr.FlushWithTimeout(ctx)
+}
+
+// Flush blocks while flushing the logr queue and all target queues, by
+// writing existing log records to valid targets.
+// Any attempts to add new log records will block until flush is complete.
+// Use `IsTimeoutError` to determine if the returned error is
+// due to a timeout.
+func (logr *Logr) FlushWithTimeout(ctx context.Context) error {
+ if !logr.HasTargets() {
+ return nil
+ }
+
+ if logr.IsShutdown() {
+ return errors.New("Flush called on shut down Logr")
+ }
+
+ rec := newFlushLogRec(logr.NewLogger())
+ logr.enqueue(rec)
+
+ select {
+ case <-ctx.Done():
+ return newTimeoutError("logr queue shutdown timeout")
+ case <-rec.flush:
+ }
+ return nil
+}
+
+// IsShutdown returns true if this Logr instance has been shut down.
+// No further log records can be enqueued and no targets added after
+// shutdown.
+func (logr *Logr) IsShutdown() bool {
+ logr.mux.Lock()
+ defer logr.mux.Unlock()
+ return logr.shutdown
+}
+
+// Shutdown cleanly stops the logging engine after making best efforts
+// to flush all targets. Call this function right before application
+// exit - logr cannot be restarted once shut down.
+// `logr.ShutdownTimeout` determines how long shutdown can execute before
+// timing out. Use `IsTimeoutError` to determine if the returned error is
+// due to a timeout.
+func (logr *Logr) Shutdown() error {
+ ctx, cancel := context.WithTimeout(context.Background(), logr.shutdownTimeout())
+ defer cancel()
+ return logr.ShutdownWithTimeout(ctx)
+}
+
+// Shutdown cleanly stops the logging engine after making best efforts
+// to flush all targets. Call this function right before application
+// exit - logr cannot be restarted once shut down.
+// Use `IsTimeoutError` to determine if the returned error is due to a
+// timeout.
+func (logr *Logr) ShutdownWithTimeout(ctx context.Context) error {
+ logr.mux.Lock()
+ if logr.shutdown {
+ logr.mux.Unlock()
+ return errors.New("Shutdown called again after shut down")
+ }
+ logr.shutdown = true
+ logr.resetLevelCache()
+ logr.mux.Unlock()
+
+ logr.metricsCloseOnce.Do(func() {
+ if logr.metricsDone != nil {
+ close(logr.metricsDone)
+ }
+ })
+
+ errs := merror.New()
+
+ // close the incoming channel and wait for read loop to exit.
+ if logr.in != nil {
+ close(logr.in)
+ select {
+ case <-ctx.Done():
+ errs.Append(newTimeoutError("logr queue shutdown timeout"))
+ case <-logr.done:
+ }
+ }
+
+ // logr.in channel should now be drained to targets and no more log records
+ // can be added.
+ logr.tmux.RLock()
+ defer logr.tmux.RUnlock()
+ for _, t := range logr.targets {
+ err := t.Shutdown(ctx)
+ if err != nil {
+ errs.Append(err)
+ }
+ }
+ return errs.ErrorOrNil()
+}
+
+// ReportError is used to notify the host application of any internal logging errors.
+// If `OnLoggerError` is not nil, it is called with the error, otherwise the error is
+// output to `os.Stderr`.
+func (logr *Logr) ReportError(err interface{}) {
+ logr.incErrorCounter()
+
+ if logr.OnLoggerError == nil {
+ fmt.Fprintln(os.Stderr, err)
+ return
+ }
+ logr.OnLoggerError(fmt.Errorf("%v", err))
+}
+
+// BorrowBuffer borrows a buffer from the pool. Release the buffer to reduce garbage collection.
+func (logr *Logr) BorrowBuffer() *bytes.Buffer {
+ if logr.DisableBufferPool {
+ return &bytes.Buffer{}
+ }
+ return logr.bufferPool.Get().(*bytes.Buffer)
+}
+
+// ReleaseBuffer returns a buffer to the pool to reduce garbage collection. The buffer is only
+// retained if less than MaxPooledBuffer.
+func (logr *Logr) ReleaseBuffer(buf *bytes.Buffer) {
+ if !logr.DisableBufferPool && buf.Cap() < logr.MaxPooledBuffer {
+ buf.Reset()
+ logr.bufferPool.Put(buf)
+ }
+}
+
+// enqueueTimeout returns amount of time a log record can take to be queued.
+// This only applies to blocking enqueue which happen after `logr.OnQueueFull` is called
+// and returns false.
+func (logr *Logr) enqueueTimeout() time.Duration {
+ if logr.EnqueueTimeout == 0 {
+ return DefaultEnqueueTimeout
+ }
+ return logr.EnqueueTimeout
+}
+
+// shutdownTimeout returns the timeout duration for `logr.Shutdown`.
+func (logr *Logr) shutdownTimeout() time.Duration {
+ if logr.ShutdownTimeout == 0 {
+ return DefaultShutdownTimeout
+ }
+ return logr.ShutdownTimeout
+}
+
+// flushTimeout returns the timeout duration for `logr.Flush`.
+func (logr *Logr) flushTimeout() time.Duration {
+ if logr.FlushTimeout == 0 {
+ return DefaultFlushTimeout
+ }
+ return logr.FlushTimeout
+}
+
+// start selects on incoming log records until done channel signals.
+// Incoming log records are fanned out to all log targets.
+func (logr *Logr) start() {
+ defer func() {
+ if r := recover(); r != nil {
+ logr.ReportError(r)
+ go logr.start()
+ }
+ }()
+
+ for rec := range logr.in {
+ if rec.flush != nil {
+ logr.flush(rec.flush)
+ } else {
+ rec.prep()
+ logr.fanout(rec)
+ }
+ }
+ close(logr.done)
+}
+
+// startMetricsUpdater updates the metrics for any polled values every `MetricsUpdateFreqSecs` seconds until
+// logr is closed.
+func (logr *Logr) startMetricsUpdater() {
+ for {
+ updateFreq := logr.getMetricsUpdateFreqMillis()
+ if updateFreq == 0 {
+ updateFreq = DefMetricsUpdateFreqMillis
+ }
+ if updateFreq < 250 {
+ updateFreq = 250 // don't peg the CPU
+ }
+
+ select {
+ case <-logr.metricsDone:
+ return
+ case <-time.After(time.Duration(updateFreq) * time.Millisecond):
+ logr.setQueueSizeGauge(float64(len(logr.in)))
+ }
+ }
+}
+
+func (logr *Logr) getMetricsUpdateFreqMillis() int64 {
+ logr.mux.RLock()
+ defer logr.mux.RUnlock()
+ return logr.MetricsUpdateFreqMillis
+}
+
+// fanout pushes a LogRec to all targets.
+func (logr *Logr) fanout(rec *LogRec) {
+ var target Target
+ defer func() {
+ if r := recover(); r != nil {
+ logr.ReportError(fmt.Errorf("fanout failed for target %s, %v", target, r))
+ }
+ }()
+
+ var logged bool
+ defer func() {
+ if logged {
+ logr.incLoggedCounter() // call this after tmux is released
+ }
+ }()
+
+ logr.tmux.RLock()
+ defer logr.tmux.RUnlock()
+ for _, target = range logr.targets {
+ if enabled, _ := target.IsLevelEnabled(rec.Level()); enabled {
+ target.Log(rec)
+ logged = true
+ }
+ }
+}
+
+// flush drains the queue and notifies when done.
+func (logr *Logr) flush(done chan<- struct{}) {
+ // first drain the logr queue.
+loop:
+ for {
+ var rec *LogRec
+ select {
+ case rec = <-logr.in:
+ if rec.flush == nil {
+ rec.prep()
+ logr.fanout(rec)
+ }
+ default:
+ break loop
+ }
+ }
+
+ logger := logr.NewLogger()
+
+ // drain all the targets; block until finished.
+ logr.tmux.RLock()
+ defer logr.tmux.RUnlock()
+ for _, target := range logr.targets {
+ rec := newFlushLogRec(logger)
+ target.Log(rec)
+ <-rec.flush
+ }
+ done <- struct{}{}
+}
diff --git a/vendor/github.com/mattermost/logr/logrec.go b/vendor/github.com/mattermost/logr/logrec.go
new file mode 100644
index 00000000..9428aaec
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/logrec.go
@@ -0,0 +1,189 @@
+package logr
+
+import (
+ "fmt"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ logrPkg string
+)
+
+func init() {
+ // Calc current package name
+ pcs := make([]uintptr, 2)
+ _ = runtime.Callers(0, pcs)
+ tmp := runtime.FuncForPC(pcs[1]).Name()
+ logrPkg = getPackageName(tmp)
+}
+
+// LogRec collects raw, unformatted data to be logged.
+// TODO: pool these? how to reliably know when targets are done with them? Copy for each target?
+type LogRec struct {
+ mux sync.RWMutex
+ time time.Time
+
+ level Level
+ logger Logger
+
+ template string
+ newline bool
+ args []interface{}
+
+ stackPC []uintptr
+ stackCount int
+
+ // flushes Logr and target queues when not nil.
+ flush chan struct{}
+
+ // remaining fields calculated by `prep`
+ msg string
+ frames []runtime.Frame
+}
+
+// NewLogRec creates a new LogRec with the current time and optional stack trace.
+func NewLogRec(lvl Level, logger Logger, template string, args []interface{}, incStacktrace bool) *LogRec {
+ rec := &LogRec{time: time.Now(), logger: logger, level: lvl, template: template, args: args}
+ if incStacktrace {
+ rec.stackPC = make([]uintptr, DefaultMaxStackFrames)
+ rec.stackCount = runtime.Callers(2, rec.stackPC)
+ }
+ return rec
+}
+
+// newFlushLogRec creates a LogRec that flushes the Logr queue and
+// any target queues that support flushing.
+func newFlushLogRec(logger Logger) *LogRec {
+ return &LogRec{logger: logger, flush: make(chan struct{})}
+}
+
+// prep resolves all args and field values to strings, and
+// resolves stack trace to frames.
+func (rec *LogRec) prep() {
+ rec.mux.Lock()
+ defer rec.mux.Unlock()
+
+ // resolve args
+ if rec.template == "" {
+ if rec.newline {
+ rec.msg = fmt.Sprintln(rec.args...)
+ } else {
+ rec.msg = fmt.Sprint(rec.args...)
+ }
+ } else {
+ rec.msg = fmt.Sprintf(rec.template, rec.args...)
+ }
+
+ // resolve stack trace
+ if rec.stackCount > 0 {
+ frames := runtime.CallersFrames(rec.stackPC[:rec.stackCount])
+ for {
+ f, more := frames.Next()
+ rec.frames = append(rec.frames, f)
+ if !more {
+ break
+ }
+ }
+
+ // remove leading logr package entries.
+ var start int
+ for i, frame := range rec.frames {
+ pkg := getPackageName(frame.Function)
+ if pkg != "" && pkg != logrPkg {
+ start = i
+ break
+ }
+ }
+ rec.frames = rec.frames[start:]
+ }
+}
+
+// WithTime returns a shallow copy of the log record while replacing
+// the time. This can be used by targets and formatters to adjust
+// the time, or take ownership of the log record.
+func (rec *LogRec) WithTime(time time.Time) *LogRec {
+ rec.mux.RLock()
+ defer rec.mux.RUnlock()
+
+ return &LogRec{
+ time: time,
+ level: rec.level,
+ logger: rec.logger,
+ template: rec.template,
+ newline: rec.newline,
+ args: rec.args,
+ msg: rec.msg,
+ stackPC: rec.stackPC,
+ stackCount: rec.stackCount,
+ frames: rec.frames,
+ }
+}
+
+// Logger returns the `Logger` that created this `LogRec`.
+func (rec *LogRec) Logger() Logger {
+ return rec.logger
+}
+
+// Time returns this log record's time stamp.
+func (rec *LogRec) Time() time.Time {
+ // no locking needed as this field is not mutated.
+ return rec.time
+}
+
+// Level returns this log record's Level.
+func (rec *LogRec) Level() Level {
+ // no locking needed as this field is not mutated.
+ return rec.level
+}
+
+// Fields returns this log record's Fields.
+func (rec *LogRec) Fields() Fields {
+ // no locking needed as this field is not mutated.
+ return rec.logger.fields
+}
+
+// Msg returns this log record's message text.
+func (rec *LogRec) Msg() string {
+ rec.mux.RLock()
+ defer rec.mux.RUnlock()
+ return rec.msg
+}
+
+// StackFrames returns this log record's stack frames or
+// nil if no stack trace was required.
+func (rec *LogRec) StackFrames() []runtime.Frame {
+ rec.mux.RLock()
+ defer rec.mux.RUnlock()
+ return rec.frames
+}
+
+// String returns a string representation of this log record.
+func (rec *LogRec) String() string {
+ if rec.flush != nil {
+ return "[flusher]"
+ }
+
+ f := &DefaultFormatter{}
+ buf := rec.logger.logr.BorrowBuffer()
+ defer rec.logger.logr.ReleaseBuffer(buf)
+ buf, _ = f.Format(rec, true, buf)
+ return strings.TrimSpace(buf.String())
+}
+
+// getPackageName reduces a fully qualified function name to the package name
+// By sirupsen: https://github.com/sirupsen/logrus/blob/master/entry.go
+func getPackageName(f string) string {
+ for {
+ lastPeriod := strings.LastIndex(f, ".")
+ lastSlash := strings.LastIndex(f, "/")
+ if lastPeriod > lastSlash {
+ f = f[:lastPeriod]
+ } else {
+ break
+ }
+ }
+ return f
+}
diff --git a/vendor/github.com/mattermost/logr/metrics.go b/vendor/github.com/mattermost/logr/metrics.go
new file mode 100644
index 00000000..24fe22b6
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/metrics.go
@@ -0,0 +1,117 @@
+package logr
+
+import (
+ "errors"
+
+ "github.com/wiggin77/merror"
+)
+
+const (
+ DefMetricsUpdateFreqMillis = 15000 // 15 seconds
+)
+
+// Counter is a simple metrics sink that can only increment a value.
+// Implementations are external to Logr and provided via `MetricsCollector`.
+type Counter interface {
+ // Inc increments the counter by 1. Use Add to increment it by arbitrary non-negative values.
+ Inc()
+ // Add adds the given value to the counter. It panics if the value is < 0.
+ Add(float64)
+}
+
+// Gauge is a simple metrics sink that can receive values and increase or decrease.
+// Implementations are external to Logr and provided via `MetricsCollector`.
+type Gauge interface {
+ // Set sets the Gauge to an arbitrary value.
+ Set(float64)
+ // Add adds the given value to the Gauge. (The value can be negative, resulting in a decrease of the Gauge.)
+ Add(float64)
+ // Sub subtracts the given value from the Gauge. (The value can be negative, resulting in an increase of the Gauge.)
+ Sub(float64)
+}
+
+// MetricsCollector provides a way for users of this Logr package to have metrics pushed
+// in an efficient way to any backend, e.g. Prometheus.
+// For each target added to Logr, the supplied MetricsCollector will provide a Gauge
+// and Counters that will be called frequently as logging occurs.
+type MetricsCollector interface {
+ // QueueSizeGauge returns a Gauge that will be updated by the named target.
+ QueueSizeGauge(target string) (Gauge, error)
+ // LoggedCounter returns a Counter that will be incremented by the named target.
+ LoggedCounter(target string) (Counter, error)
+ // ErrorCounter returns a Counter that will be incremented by the named target.
+ ErrorCounter(target string) (Counter, error)
+ // DroppedCounter returns a Counter that will be incremented by the named target.
+ DroppedCounter(target string) (Counter, error)
+ // BlockedCounter returns a Counter that will be incremented by the named target.
+ BlockedCounter(target string) (Counter, error)
+}
+
+// TargetWithMetrics is a target that provides metrics.
+type TargetWithMetrics interface {
+ EnableMetrics(collector MetricsCollector, updateFreqMillis int64) error
+}
+
+func (logr *Logr) getMetricsCollector() MetricsCollector {
+ logr.mux.RLock()
+ defer logr.mux.RUnlock()
+ return logr.metrics
+}
+
+// SetMetricsCollector enables metrics collection by supplying a MetricsCollector.
+// The MetricsCollector provides counters and gauges that are updated by log targets.
+func (logr *Logr) SetMetricsCollector(collector MetricsCollector) error {
+ if collector == nil {
+ return errors.New("collector cannot be nil")
+ }
+
+ logr.mux.Lock()
+ logr.metrics = collector
+ logr.queueSizeGauge, _ = collector.QueueSizeGauge("_logr")
+ logr.loggedCounter, _ = collector.LoggedCounter("_logr")
+ logr.errorCounter, _ = collector.ErrorCounter("_logr")
+ logr.mux.Unlock()
+
+ logr.metricsInitOnce.Do(func() {
+ logr.metricsDone = make(chan struct{})
+ go logr.startMetricsUpdater()
+ })
+
+ merr := merror.New()
+
+ logr.tmux.RLock()
+ defer logr.tmux.RUnlock()
+ for _, target := range logr.targets {
+ if tm, ok := target.(TargetWithMetrics); ok {
+ if err := tm.EnableMetrics(collector, logr.MetricsUpdateFreqMillis); err != nil {
+ merr.Append(err)
+ }
+ }
+
+ }
+ return merr.ErrorOrNil()
+}
+
+func (logr *Logr) setQueueSizeGauge(val float64) {
+ logr.mux.RLock()
+ defer logr.mux.RUnlock()
+ if logr.queueSizeGauge != nil {
+ logr.queueSizeGauge.Set(val)
+ }
+}
+
+func (logr *Logr) incLoggedCounter() {
+ logr.mux.RLock()
+ defer logr.mux.RUnlock()
+ if logr.loggedCounter != nil {
+ logr.loggedCounter.Inc()
+ }
+}
+
+func (logr *Logr) incErrorCounter() {
+ logr.mux.RLock()
+ defer logr.mux.RUnlock()
+ if logr.errorCounter != nil {
+ logr.errorCounter.Inc()
+ }
+}
diff --git a/vendor/github.com/mattermost/logr/target.go b/vendor/github.com/mattermost/logr/target.go
new file mode 100644
index 00000000..f8e7bf75
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/target.go
@@ -0,0 +1,299 @@
+package logr
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "sync"
+ "time"
+)
+
+// Target represents a destination for log records such as file,
+// database, TCP socket, etc.
+type Target interface {
+ // SetName provides an optional name for the target.
+ SetName(name string)
+
+ // IsLevelEnabled returns true if this target should emit
+ // logs for the specified level. Also determines if
+ // a stack trace is required.
+ IsLevelEnabled(Level) (enabled bool, stacktrace bool)
+
+ // Formatter returns the Formatter associated with this Target.
+ Formatter() Formatter
+
+ // Log outputs the log record to this target's destination.
+ Log(rec *LogRec)
+
+ // Shutdown makes best effort to flush target queue and
+ // frees/closes all resources.
+ Shutdown(ctx context.Context) error
+}
+
+// RecordWriter can convert a LogRecord to bytes and output to some data sink.
+type RecordWriter interface {
+ Write(rec *LogRec) error
+}
+
+// Basic provides the basic functionality of a Target that can be used
+// to more easily compose your own Targets. To use, just embed Basic
+// in your target type, implement `RecordWriter`, and call `(*Basic).Start`.
+type Basic struct {
+ target Target
+
+ filter Filter
+ formatter Formatter
+
+ in chan *LogRec
+ done chan struct{}
+ w RecordWriter
+
+ mux sync.RWMutex
+ name string
+
+ metrics bool
+ queueSizeGauge Gauge
+ loggedCounter Counter
+ errorCounter Counter
+ droppedCounter Counter
+ blockedCounter Counter
+
+ metricsUpdateFreqMillis int64
+}
+
+// Start initializes this target helper and starts accepting log records for processing.
+func (b *Basic) Start(target Target, rw RecordWriter, filter Filter, formatter Formatter, maxQueued int) {
+ if filter == nil {
+ filter = &StdFilter{Lvl: Fatal}
+ }
+ if formatter == nil {
+ formatter = &DefaultFormatter{}
+ }
+
+ b.target = target
+ b.filter = filter
+ b.formatter = formatter
+ b.in = make(chan *LogRec, maxQueued)
+ b.done = make(chan struct{}, 1)
+ b.w = rw
+ go b.start()
+
+ if b.hasMetrics() {
+ go b.startMetricsUpdater()
+ }
+}
+
+func (b *Basic) SetName(name string) {
+ b.mux.Lock()
+ defer b.mux.Unlock()
+ b.name = name
+}
+
+// IsLevelEnabled returns true if this target should emit
+// logs for the specified level. Also determines if
+// a stack trace is required.
+func (b *Basic) IsLevelEnabled(lvl Level) (enabled bool, stacktrace bool) {
+ return b.filter.IsEnabled(lvl), b.filter.IsStacktraceEnabled(lvl)
+}
+
+// Formatter returns the Formatter associated with this Target.
+func (b *Basic) Formatter() Formatter {
+ return b.formatter
+}
+
+// Shutdown stops processing log records after making best
+// effort to flush queue.
+func (b *Basic) Shutdown(ctx context.Context) error {
+ // close the incoming channel and wait for read loop to exit.
+ close(b.in)
+ select {
+ case <-ctx.Done():
+ case <-b.done:
+ }
+
+ // b.in channel should now be drained.
+ return nil
+}
+
+// Log outputs the log record to this targets destination.
+func (b *Basic) Log(rec *LogRec) {
+ lgr := rec.Logger().Logr()
+ select {
+ case b.in <- rec:
+ default:
+ handler := lgr.OnTargetQueueFull
+ if handler != nil && handler(b.target, rec, cap(b.in)) {
+ b.incDroppedCounter()
+ return // drop the record
+ }
+ b.incBlockedCounter()
+
+ select {
+ case <-time.After(lgr.enqueueTimeout()):
+ lgr.ReportError(fmt.Errorf("target enqueue timeout for log rec [%v]", rec))
+ case b.in <- rec: // block until success or timeout
+ }
+ }
+}
+
+// Metrics enables metrics collection using the provided MetricsCollector.
+func (b *Basic) EnableMetrics(collector MetricsCollector, updateFreqMillis int64) error {
+ name := fmt.Sprintf("%v", b)
+
+ b.mux.Lock()
+ defer b.mux.Unlock()
+
+ b.metrics = true
+ b.metricsUpdateFreqMillis = updateFreqMillis
+
+ var err error
+
+ if b.queueSizeGauge, err = collector.QueueSizeGauge(name); err != nil {
+ return err
+ }
+ if b.loggedCounter, err = collector.LoggedCounter(name); err != nil {
+ return err
+ }
+ if b.errorCounter, err = collector.ErrorCounter(name); err != nil {
+ return err
+ }
+ if b.droppedCounter, err = collector.DroppedCounter(name); err != nil {
+ return err
+ }
+ if b.blockedCounter, err = collector.BlockedCounter(name); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (b *Basic) hasMetrics() bool {
+ b.mux.RLock()
+ defer b.mux.RUnlock()
+ return b.metrics
+}
+
+func (b *Basic) setQueueSizeGauge(val float64) {
+ b.mux.RLock()
+ defer b.mux.RUnlock()
+ if b.queueSizeGauge != nil {
+ b.queueSizeGauge.Set(val)
+ }
+}
+
+func (b *Basic) incLoggedCounter() {
+ b.mux.RLock()
+ defer b.mux.RUnlock()
+ if b.loggedCounter != nil {
+ b.loggedCounter.Inc()
+ }
+}
+
+func (b *Basic) incErrorCounter() {
+ b.mux.RLock()
+ defer b.mux.RUnlock()
+ if b.errorCounter != nil {
+ b.errorCounter.Inc()
+ }
+}
+
+func (b *Basic) incDroppedCounter() {
+ b.mux.RLock()
+ defer b.mux.RUnlock()
+ if b.droppedCounter != nil {
+ b.droppedCounter.Inc()
+ }
+}
+
+func (b *Basic) incBlockedCounter() {
+ b.mux.RLock()
+ defer b.mux.RUnlock()
+ if b.blockedCounter != nil {
+ b.blockedCounter.Inc()
+ }
+}
+
+// String returns a name for this target. Use `SetName` to specify a name.
+func (b *Basic) String() string {
+ b.mux.RLock()
+ defer b.mux.RUnlock()
+
+ if b.name != "" {
+ return b.name
+ }
+ return fmt.Sprintf("%T", b.target)
+}
+
+// Start accepts log records via In channel and writes to the
+// supplied writer, until Done channel signaled.
+func (b *Basic) start() {
+ defer func() {
+ if r := recover(); r != nil {
+ fmt.Fprintln(os.Stderr, "Basic.start -- ", r)
+ go b.start()
+ }
+ }()
+
+ for rec := range b.in {
+ if rec.flush != nil {
+ b.flush(rec.flush)
+ } else {
+ err := b.w.Write(rec)
+ if err != nil {
+ b.incErrorCounter()
+ rec.Logger().Logr().ReportError(err)
+ } else {
+ b.incLoggedCounter()
+ }
+ }
+ }
+ close(b.done)
+}
+
+// startMetricsUpdater updates the metrics for any polled values every `MetricsUpdateFreqSecs` seconds until
+// target is closed.
+func (b *Basic) startMetricsUpdater() {
+ for {
+ updateFreq := b.getMetricsUpdateFreqMillis()
+ if updateFreq == 0 {
+ updateFreq = DefMetricsUpdateFreqMillis
+ }
+ if updateFreq < 250 {
+ updateFreq = 250 // don't peg the CPU
+ }
+
+ select {
+ case <-b.done:
+ return
+ case <-time.After(time.Duration(updateFreq) * time.Millisecond):
+ b.setQueueSizeGauge(float64(len(b.in)))
+ }
+ }
+}
+
+func (b *Basic) getMetricsUpdateFreqMillis() int64 {
+ b.mux.RLock()
+ defer b.mux.RUnlock()
+ return b.metricsUpdateFreqMillis
+}
+
+// flush drains the queue and notifies when done.
+func (b *Basic) flush(done chan<- struct{}) {
+ for {
+ var rec *LogRec
+ var err error
+ select {
+ case rec = <-b.in:
+ // ignore any redundant flush records.
+ if rec.flush == nil {
+ err = b.w.Write(rec)
+ if err != nil {
+ b.incErrorCounter()
+ rec.Logger().Logr().ReportError(err)
+ }
+ }
+ default:
+ done <- struct{}{}
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/mattermost/logr/target/file.go b/vendor/github.com/mattermost/logr/target/file.go
new file mode 100644
index 00000000..0fd50768
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/target/file.go
@@ -0,0 +1,87 @@
+package target
+
+import (
+ "context"
+ "io"
+
+ "github.com/mattermost/logr"
+ "github.com/wiggin77/merror"
+ "gopkg.in/natefinch/lumberjack.v2"
+)
+
+type FileOptions struct {
+ // Filename is the file to write logs to. Backup log files will be retained
+ // in the same directory. It uses <processname>-lumberjack.log in
+ // os.TempDir() if empty.
+ Filename string
+
+ // MaxSize is the maximum size in megabytes of the log file before it gets
+ // rotated. It defaults to 100 megabytes.
+ MaxSize int
+
+ // MaxAge is the maximum number of days to retain old log files based on the
+ // timestamp encoded in their filename. Note that a day is defined as 24
+ // hours and may not exactly correspond to calendar days due to daylight
+ // savings, leap seconds, etc. The default is not to remove old log files
+ // based on age.
+ MaxAge int
+
+ // MaxBackups is the maximum number of old log files to retain. The default
+ // is to retain all old log files (though MaxAge may still cause them to get
+ // deleted.)
+ MaxBackups int
+
+ // Compress determines if the rotated log files should be compressed
+ // using gzip. The default is not to perform compression.
+ Compress bool
+}
+
+// File outputs log records to a file which can be log rotated based on size or age.
+// Uses `https://github.com/natefinch/lumberjack` for rotation.
+type File struct {
+ logr.Basic
+ out io.WriteCloser
+}
+
+// NewFileTarget creates a target capable of outputting log records to a rotated file.
+func NewFileTarget(filter logr.Filter, formatter logr.Formatter, opts FileOptions, maxQueue int) *File {
+ lumber := &lumberjack.Logger{
+ Filename: opts.Filename,
+ MaxSize: opts.MaxSize,
+ MaxBackups: opts.MaxBackups,
+ MaxAge: opts.MaxAge,
+ Compress: opts.Compress,
+ }
+ f := &File{out: lumber}
+ f.Basic.Start(f, f, filter, formatter, maxQueue)
+ return f
+}
+
+// Write converts the log record to bytes, via the Formatter,
+// and outputs to a file.
+func (f *File) Write(rec *logr.LogRec) error {
+ _, stacktrace := f.IsLevelEnabled(rec.Level())
+
+ buf := rec.Logger().Logr().BorrowBuffer()
+ defer rec.Logger().Logr().ReleaseBuffer(buf)
+
+ buf, err := f.Formatter().Format(rec, stacktrace, buf)
+ if err != nil {
+ return err
+ }
+ _, err = f.out.Write(buf.Bytes())
+ return err
+}
+
+// Shutdown flushes any remaining log records and closes the file.
+func (f *File) Shutdown(ctx context.Context) error {
+ errs := merror.New()
+
+ err := f.Basic.Shutdown(ctx)
+ errs.Append(err)
+
+ err = f.out.Close()
+ errs.Append(err)
+
+ return errs.ErrorOrNil()
+}
diff --git a/vendor/github.com/mattermost/logr/target/syslog.go b/vendor/github.com/mattermost/logr/target/syslog.go
new file mode 100644
index 00000000..1d2013b6
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/target/syslog.go
@@ -0,0 +1,89 @@
+// +build !windows,!nacl,!plan9
+
+package target
+
+import (
+ "context"
+ "fmt"
+ "log/syslog"
+
+ "github.com/mattermost/logr"
+ "github.com/wiggin77/merror"
+)
+
+// Syslog outputs log records to local or remote syslog.
+type Syslog struct {
+ logr.Basic
+ w *syslog.Writer
+}
+
+// SyslogParams provides parameters for dialing a syslog daemon.
+type SyslogParams struct {
+ Network string
+ Raddr string
+ Priority syslog.Priority
+ Tag string
+}
+
+// NewSyslogTarget creates a target capable of outputting log records to remote or local syslog.
+func NewSyslogTarget(filter logr.Filter, formatter logr.Formatter, params *SyslogParams, maxQueue int) (*Syslog, error) {
+ writer, err := syslog.Dial(params.Network, params.Raddr, params.Priority, params.Tag)
+ if err != nil {
+ return nil, err
+ }
+
+ s := &Syslog{w: writer}
+ s.Basic.Start(s, s, filter, formatter, maxQueue)
+
+ return s, nil
+}
+
+// Shutdown stops processing log records after making best
+// effort to flush queue.
+func (s *Syslog) Shutdown(ctx context.Context) error {
+ errs := merror.New()
+
+ err := s.Basic.Shutdown(ctx)
+ errs.Append(err)
+
+ err = s.w.Close()
+ errs.Append(err)
+
+ return errs.ErrorOrNil()
+}
+
+// Write converts the log record to bytes, via the Formatter,
+// and outputs to syslog.
+func (s *Syslog) Write(rec *logr.LogRec) error {
+ _, stacktrace := s.IsLevelEnabled(rec.Level())
+
+ buf := rec.Logger().Logr().BorrowBuffer()
+ defer rec.Logger().Logr().ReleaseBuffer(buf)
+
+ buf, err := s.Formatter().Format(rec, stacktrace, buf)
+ if err != nil {
+ return err
+ }
+ txt := buf.String()
+
+ switch rec.Level() {
+ case logr.Panic, logr.Fatal:
+ err = s.w.Crit(txt)
+ case logr.Error:
+ err = s.w.Err(txt)
+ case logr.Warn:
+ err = s.w.Warning(txt)
+ case logr.Debug, logr.Trace:
+ err = s.w.Debug(txt)
+ default:
+ // logr.Info plus all custom levels.
+ err = s.w.Info(txt)
+ }
+
+ if err != nil {
+ reporter := rec.Logger().Logr().ReportError
+ reporter(fmt.Errorf("syslog write fail: %w", err))
+ // syslog writer will try to reconnect.
+ }
+ return err
+}
diff --git a/vendor/github.com/mattermost/logr/target/writer.go b/vendor/github.com/mattermost/logr/target/writer.go
new file mode 100644
index 00000000..2250da51
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/target/writer.go
@@ -0,0 +1,40 @@
+package target
+
+import (
+ "io"
+ "io/ioutil"
+
+ "github.com/mattermost/logr"
+)
+
+// Writer outputs log records to any `io.Writer`.
+type Writer struct {
+ logr.Basic
+ out io.Writer
+}
+
+// NewWriterTarget creates a target capable of outputting log records to an io.Writer.
+func NewWriterTarget(filter logr.Filter, formatter logr.Formatter, out io.Writer, maxQueue int) *Writer {
+ if out == nil {
+ out = ioutil.Discard
+ }
+ w := &Writer{out: out}
+ w.Basic.Start(w, w, filter, formatter, maxQueue)
+ return w
+}
+
+// Write converts the log record to bytes, via the Formatter,
+// and outputs to the io.Writer.
+func (w *Writer) Write(rec *logr.LogRec) error {
+ _, stacktrace := w.IsLevelEnabled(rec.Level())
+
+ buf := rec.Logger().Logr().BorrowBuffer()
+ defer rec.Logger().Logr().ReleaseBuffer(buf)
+
+ buf, err := w.Formatter().Format(rec, stacktrace, buf)
+ if err != nil {
+ return err
+ }
+ _, err = w.out.Write(buf.Bytes())
+ return err
+}
diff --git a/vendor/github.com/mattermost/logr/timeout.go b/vendor/github.com/mattermost/logr/timeout.go
new file mode 100644
index 00000000..37737bcf
--- /dev/null
+++ b/vendor/github.com/mattermost/logr/timeout.go
@@ -0,0 +1,34 @@
+package logr
+
+import "github.com/wiggin77/merror"
+
+// timeoutError is returned from functions that can timeout.
+type timeoutError struct {
+ text string
+}
+
+// newTimeoutError returns a TimeoutError.
+func newTimeoutError(text string) timeoutError {
+ return timeoutError{text: text}
+}
+
+// IsTimeoutError returns true if err is a TimeoutError.
+func IsTimeoutError(err error) bool {
+ if _, ok := err.(timeoutError); ok {
+ return true
+ }
+ // if a multi-error, return true if any of the errors
+ // are TimeoutError
+ if merr, ok := err.(*merror.MError); ok {
+ for _, e := range merr.Errors() {
+ if IsTimeoutError(e) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (err timeoutError) Error() string {
+ return err.text
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/v5/LICENSE.txt
index b40b5e58..8382687d 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/LICENSE.txt
+++ b/vendor/github.com/mattermost/mattermost-server/v5/LICENSE.txt
@@ -11,7 +11,7 @@ You may be licensed to use source code to create compiled versions not produced
1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or
2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com
-You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/default.json, model/,
+You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/default.json, i18n/, model/,
plugin/ and all subdirectories thereof) under the Apache License v2.0.
We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/NOTICE.txt b/vendor/github.com/mattermost/mattermost-server/v5/NOTICE.txt
index 0316f702..c067c12f 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/NOTICE.txt
+++ b/vendor/github.com/mattermost/mattermost-server/v5/NOTICE.txt
@@ -4132,3 +4132,145 @@ A caching, resizing image proxy written in Go
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
+
+---
+
+## oov/psd
+
+This product contains 'psd' by oov.
+
+A PSD/PSB file reader for go
+
+* HOMEPAGE:
+ * https://github.com/oov/psd
+
+* LICENSE: MIT
+
+MIT License
+
+Copyright (c) 2016 oov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+---
+
+## gopherjs
+
+This product contains 'gopherjs' by Richard Musiol.
+
+A Go code to javascript code compiler.
+
+* HOMEPAGE:
+ * https://github.com/gopherjs/gopherjs
+
+* LICENSE:
+
+Copyright (c) 2013 Richard Musiol. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+---
+
+## semver
+
+This product contains 'semver' by Masterminds.
+
+The semver package provides the ability to work with Semantic Versions in Go.
+
+* HOMEPAGE:
+ * https://github.com/Masterminds/semver
+
+* LICENSE:
+
+Copyright (C) 2014-2019, Matt Butcher and Matt Farina
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+---
+
+## Date Constraints
+
+This product contains 'dateconstraints' by Eli Yukelzon.
+
+Go library to validate a date against constraints
+
+* HOMEPAGE:
+ * https://github.com/reflog/dateconstraints
+
+* LICENSE:
+
+MIT License
+
+Copyright (c) 2020 Eli Yukelzon
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/default.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/default.go
index f356eec7..1e409b19 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/default.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/default.go
@@ -4,9 +4,13 @@
package mlog
import (
+ "context"
"encoding/json"
+ "errors"
"fmt"
"os"
+
+ "github.com/mattermost/logr"
)
// defaultLog manually encodes the log to STDERR, providing a basic, default logging implementation
@@ -49,3 +53,43 @@ func defaultCriticalLog(msg string, fields ...Field) {
// We map critical to error in zap, so be consistent.
defaultLog("error", msg, fields...)
}
+
+func defaultCustomLog(lvl LogLevel, msg string, fields ...Field) {
+ // custom log levels are only output once log targets are configured.
+}
+
+func defaultCustomMultiLog(lvl []LogLevel, msg string, fields ...Field) {
+ // custom log levels are only output once log targets are configured.
+}
+
+func defaultFlush(ctx context.Context) error {
+ return nil
+}
+
+func defaultAdvancedConfig(cfg LogTargetCfg) error {
+ // mlog.ConfigAdvancedConfig should not be called until default
+ // logger is replaced with mlog.Logger instance.
+ return errors.New("cannot config advanced logging on default logger")
+}
+
+func defaultAdvancedShutdown(ctx context.Context) error {
+ return nil
+}
+
+func defaultAddTarget(targets ...logr.Target) error {
+ // mlog.AddTarget should not be called until default
+ // logger is replaced with mlog.Logger instance.
+ return errors.New("cannot AddTarget on default logger")
+}
+
+func defaultRemoveTargets(ctx context.Context, f func(TargetInfo) bool) error {
+ // mlog.RemoveTargets should not be called until default
+ // logger is replaced with mlog.Logger instance.
+ return errors.New("cannot RemoveTargets on default logger")
+}
+
+func defaultEnableMetrics(collector logr.MetricsCollector) error {
+ // mlog.EnableMetrics should not be called until default
+ // logger is replaced with mlog.Logger instance.
+ return errors.New("cannot EnableMetrics on default logger")
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/errors.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/errors.go
new file mode 100644
index 00000000..f8d58968
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/errors.go
@@ -0,0 +1,30 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See LICENSE.txt for license information.
+
+package mlog
+
+import "github.com/mattermost/logr"
+
+// onLoggerError is called when the logging system encounters an error,
+// such as a target not able to write records. The targets will keep trying
+// however the error will be logged with a dedicated level that can be output
+// to a safe/always available target for monitoring or alerting.
+func onLoggerError(err error) {
+ Log(LvlLogError, "advanced logging error", Err(err))
+}
+
+// onQueueFull is called when the main logger queue is full, indicating the
+// volume and frequency of log record creation is too high for the queue size
+// and/or the target latencies.
+func onQueueFull(rec *logr.LogRec, maxQueueSize int) bool {
+ Log(LvlLogError, "main queue full, dropping record", Any("rec", rec))
+ return true // drop record
+}
+
+// onTargetQueueFull is called when the main logger queue is full, indicating the
+// volume and frequency of log record creation is too high for the target's queue size
+// and/or the target latency.
+func onTargetQueueFull(target logr.Target, rec *logr.LogRec, maxQueueSize int) bool {
+ Log(LvlLogError, "target queue full, dropping record", String("target", ""), Any("rec", rec))
+ return true // drop record
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/global.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/global.go
index 73f40b2f..2986d92d 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/global.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/global.go
@@ -4,6 +4,11 @@
package mlog
import (
+ "context"
+ "log"
+ "sync/atomic"
+
+ "github.com/mattermost/logr"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
@@ -11,6 +16,10 @@ import (
var globalLogger *Logger
func InitGlobalLogger(logger *Logger) {
+ // Clean up previous instance.
+ if globalLogger != nil && globalLogger.logrLogger != nil {
+ globalLogger.logrLogger.Logr().Shutdown()
+ }
glob := *logger
glob.zap = glob.zap.WithOptions(zap.AddCallerSkip(1))
globalLogger = &glob
@@ -19,13 +28,46 @@ func InitGlobalLogger(logger *Logger) {
Warn = globalLogger.Warn
Error = globalLogger.Error
Critical = globalLogger.Critical
+ Log = globalLogger.Log
+ LogM = globalLogger.LogM
+ Flush = globalLogger.Flush
+ ConfigAdvancedLogging = globalLogger.ConfigAdvancedLogging
+ ShutdownAdvancedLogging = globalLogger.ShutdownAdvancedLogging
+ AddTarget = globalLogger.AddTarget
+ RemoveTargets = globalLogger.RemoveTargets
+ EnableMetrics = globalLogger.EnableMetrics
+}
+
+// logWriterFunc provides access to mlog via io.Writer, so the standard logger
+// can be redirected to use mlog and whatever targets are defined.
+type logWriterFunc func([]byte) (int, error)
+
+func (lw logWriterFunc) Write(p []byte) (int, error) {
+ return lw(p)
}
func RedirectStdLog(logger *Logger) {
- zap.RedirectStdLogAt(logger.zap.With(zap.String("source", "stdlog")).WithOptions(zap.AddCallerSkip(-2)), zapcore.ErrorLevel)
+ if atomic.LoadInt32(&disableZap) == 0 {
+ zap.RedirectStdLogAt(logger.zap.With(zap.String("source", "stdlog")).WithOptions(zap.AddCallerSkip(-2)), zapcore.ErrorLevel)
+ return
+ }
+
+ writer := func(p []byte) (int, error) {
+ Log(LvlStdLog, string(p))
+ return len(p), nil
+ }
+ log.SetOutput(logWriterFunc(writer))
}
type LogFunc func(string, ...Field)
+type LogFuncCustom func(LogLevel, string, ...Field)
+type LogFuncCustomMulti func([]LogLevel, string, ...Field)
+type FlushFunc func(context.Context) error
+type ConfigFunc func(cfg LogTargetCfg) error
+type ShutdownFunc func(context.Context) error
+type AddTargetFunc func(...logr.Target) error
+type RemoveTargetsFunc func(context.Context, func(TargetInfo) bool) error
+type EnableMetricsFunc func(logr.MetricsCollector) error
// DON'T USE THIS Modify the level on the app logger
func GloballyDisableDebugLogForTest() {
@@ -42,3 +84,12 @@ var Info LogFunc = defaultInfoLog
var Warn LogFunc = defaultWarnLog
var Error LogFunc = defaultErrorLog
var Critical LogFunc = defaultCriticalLog
+var Log LogFuncCustom = defaultCustomLog
+var LogM LogFuncCustomMulti = defaultCustomMultiLog
+var Flush FlushFunc = defaultFlush
+
+var ConfigAdvancedLogging ConfigFunc = defaultAdvancedConfig
+var ShutdownAdvancedLogging ShutdownFunc = defaultAdvancedShutdown
+var AddTarget AddTargetFunc = defaultAddTarget
+var RemoveTargets RemoveTargetsFunc = defaultRemoveTargets
+var EnableMetrics EnableMetricsFunc = defaultEnableMetrics
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/levels.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/levels.go
new file mode 100644
index 00000000..54bd2549
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/levels.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See LICENSE.txt for license information.
+
+package mlog
+
+// Standard levels
+var (
+ LvlPanic = LogLevel{ID: 0, Name: "panic", Stacktrace: true}
+ LvlFatal = LogLevel{ID: 1, Name: "fatal", Stacktrace: true}
+ LvlError = LogLevel{ID: 2, Name: "error"}
+ LvlWarn = LogLevel{ID: 3, Name: "warn"}
+ LvlInfo = LogLevel{ID: 4, Name: "info"}
+ LvlDebug = LogLevel{ID: 5, Name: "debug"}
+ LvlTrace = LogLevel{ID: 6, Name: "trace"}
+ // used by redirected standard logger
+ LvlStdLog = LogLevel{ID: 10, Name: "stdlog"}
+ // used only by the logger
+ LvlLogError = LogLevel{ID: 11, Name: "logerror", Stacktrace: true}
+)
+
+// Register custom (discrete) levels here.
+// !!!!! ID's must not exceed 32,768 !!!!!!
+var (
+ // used by the audit system
+ LvlAuditAPI = LogLevel{ID: 100, Name: "audit-api"}
+ LvlAuditContent = LogLevel{ID: 101, Name: "audit-content"}
+ LvlAuditPerms = LogLevel{ID: 102, Name: "audit-permissions"}
+ LvlAuditCLI = LogLevel{ID: 103, Name: "audit-cli"}
+
+ // used by the TCP log target
+ LvlTcpLogTarget = LogLevel{ID: 120, Name: "TcpLogTarget"}
+
+ // add more here ...
+)
+
+// Combinations for LogM (log multi)
+var (
+ MLvlAuditAll = []LogLevel{LvlAuditAPI, LvlAuditContent, LvlAuditPerms, LvlAuditCLI}
+)
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/log.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/log.go
index 1a6c2de9..eaa8c109 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/log.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/log.go
@@ -4,10 +4,15 @@
package mlog
import (
+ "context"
+ "fmt"
"io"
"log"
"os"
+ "sync/atomic"
+ "time"
+ "github.com/mattermost/logr"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"gopkg.in/natefinch/lumberjack.v2"
@@ -22,6 +27,19 @@ const (
LevelWarn = "warn"
// Errors are messages about things we know are problems
LevelError = "error"
+
+ // DefaultFlushTimeout is the default amount of time mlog.Flush will wait
+ // before timing out.
+ DefaultFlushTimeout = time.Second * 5
+)
+
+var (
+ // disableZap is set when Zap should be disabled and Logr used instead.
+ // This is needed for unit testing as Zap has no shutdown capabilities
+ // and holds file handles until process exit. Currently unit test create
+ // many server instances, and thus many Zap log files.
+ // This flag will be removed when Zap is permanently replaced.
+ disableZap int32
)
// Type and function aliases from zap to limit the libraries scope into MM code
@@ -38,6 +56,8 @@ var NamedErr = zap.NamedError
var Bool = zap.Bool
var Duration = zap.Duration
+type TargetInfo logr.TargetInfo
+
type LoggerConfiguration struct {
EnableConsole bool
ConsoleJson bool
@@ -52,6 +72,7 @@ type Logger struct {
zap *zap.Logger
consoleLevel zap.AtomicLevel
fileLevel zap.AtomicLevel
+ logrLogger *logr.Logger
}
func getZapLevel(level string) zapcore.Level {
@@ -84,6 +105,7 @@ func NewLogger(config *LoggerConfiguration) *Logger {
logger := &Logger{
consoleLevel: zap.NewAtomicLevelAt(getZapLevel(config.ConsoleLevel)),
fileLevel: zap.NewAtomicLevelAt(getZapLevel(config.FileLevel)),
+ logrLogger: newLogr(),
}
if config.EnableConsole {
@@ -93,13 +115,33 @@ func NewLogger(config *LoggerConfiguration) *Logger {
}
if config.EnableFile {
- writer := zapcore.AddSync(&lumberjack.Logger{
- Filename: config.FileLocation,
- MaxSize: 100,
- Compress: true,
- })
- core := zapcore.NewCore(makeEncoder(config.FileJson), writer, logger.fileLevel)
- cores = append(cores, core)
+ if atomic.LoadInt32(&disableZap) != 0 {
+ t := &LogTarget{
+ Type: "file",
+ Format: "json",
+ Levels: mlogLevelToLogrLevels(config.FileLevel),
+ MaxQueueSize: DefaultMaxTargetQueue,
+ Options: []byte(fmt.Sprintf(`{"Filename":"%s", "MaxSizeMB":%d, "Compress":%t}`,
+ config.FileLocation, 100, true)),
+ }
+ if !config.FileJson {
+ t.Format = "plain"
+ }
+ if tgt, err := NewLogrTarget("mlogFile", t); err == nil {
+ logger.logrLogger.Logr().AddTarget(tgt)
+ } else {
+ Error("error creating mlogFile", Err(err))
+ }
+ } else {
+ writer := zapcore.AddSync(&lumberjack.Logger{
+ Filename: config.FileLocation,
+ MaxSize: 100,
+ Compress: true,
+ })
+
+ core := zapcore.NewCore(makeEncoder(config.FileJson), writer, logger.fileLevel)
+ cores = append(cores, core)
+ }
}
combinedCore := zapcore.NewTee(cores...)
@@ -107,7 +149,6 @@ func NewLogger(config *LoggerConfiguration) *Logger {
logger.zap = zap.New(combinedCore,
zap.AddCaller(),
)
-
return logger
}
@@ -123,6 +164,10 @@ func (l *Logger) SetConsoleLevel(level string) {
func (l *Logger) With(fields ...Field) *Logger {
newlogger := *l
newlogger.zap = newlogger.zap.With(fields...)
+ if newlogger.logrLogger != nil {
+ ll := newlogger.logrLogger.WithFields(zapToLogr(fields))
+ newlogger.logrLogger = &ll
+ }
return &newlogger
}
@@ -161,20 +206,120 @@ func (l *Logger) Sugar() *SugarLogger {
func (l *Logger) Debug(message string, fields ...Field) {
l.zap.Debug(message, fields...)
+ if isLevelEnabled(l.logrLogger, logr.Debug) {
+ l.logrLogger.WithFields(zapToLogr(fields)).Debug(message)
+ }
}
func (l *Logger) Info(message string, fields ...Field) {
l.zap.Info(message, fields...)
+ if isLevelEnabled(l.logrLogger, logr.Info) {
+ l.logrLogger.WithFields(zapToLogr(fields)).Info(message)
+ }
}
func (l *Logger) Warn(message string, fields ...Field) {
l.zap.Warn(message, fields...)
+ if isLevelEnabled(l.logrLogger, logr.Warn) {
+ l.logrLogger.WithFields(zapToLogr(fields)).Warn(message)
+ }
}
func (l *Logger) Error(message string, fields ...Field) {
l.zap.Error(message, fields...)
+ if isLevelEnabled(l.logrLogger, logr.Error) {
+ l.logrLogger.WithFields(zapToLogr(fields)).Error(message)
+ }
}
func (l *Logger) Critical(message string, fields ...Field) {
l.zap.Error(message, fields...)
+ if isLevelEnabled(l.logrLogger, logr.Error) {
+ l.logrLogger.WithFields(zapToLogr(fields)).Error(message)
+ }
+}
+
+func (l *Logger) Log(level LogLevel, message string, fields ...Field) {
+ l.logrLogger.WithFields(zapToLogr(fields)).Log(logr.Level(level), message)
+}
+
+func (l *Logger) LogM(levels []LogLevel, message string, fields ...Field) {
+ var logger *logr.Logger
+ for _, lvl := range levels {
+ if isLevelEnabled(l.logrLogger, logr.Level(lvl)) {
+ // don't create logger with fields unless at least one level is active.
+ if logger == nil {
+ l := l.logrLogger.WithFields(zapToLogr(fields))
+ logger = &l
+ }
+ logger.Log(logr.Level(lvl), message)
+ }
+ }
+}
+
+func (l *Logger) Flush(cxt context.Context) error {
+ return l.logrLogger.Logr().FlushWithTimeout(cxt)
+}
+
+// ShutdownAdvancedLogging stops the logger from accepting new log records and tries to
+// flush queues within the context timeout. Once complete all targets are shutdown
+// and any resources released.
+func (l *Logger) ShutdownAdvancedLogging(cxt context.Context) error {
+ err := l.logrLogger.Logr().ShutdownWithTimeout(cxt)
+ l.logrLogger = newLogr()
+ return err
+}
+
+// ConfigAdvancedLoggingConfig (re)configures advanced logging based on the
+// specified log targets. This is the easiest way to get the advanced logger
+// configured via a config source such as file.
+func (l *Logger) ConfigAdvancedLogging(targets LogTargetCfg) error {
+ if err := l.ShutdownAdvancedLogging(context.Background()); err != nil {
+ Error("error shutting down previous logger", Err(err))
+ }
+
+ err := logrAddTargets(l.logrLogger, targets)
+ return err
+}
+
+// AddTarget adds one or more logr.Target to the advanced logger. This is the preferred method
+// to add custom targets or provide configuration that cannot be expressed via a
+// config source.
+func (l *Logger) AddTarget(targets ...logr.Target) error {
+ return l.logrLogger.Logr().AddTarget(targets...)
+}
+
+// RemoveTargets selectively removes targets that were previously added to this logger instance
+// using the passed in filter function. The filter function should return true to remove the target
+// and false to keep it.
+func (l *Logger) RemoveTargets(ctx context.Context, f func(ti TargetInfo) bool) error {
+ // Use locally defined TargetInfo type so we don't spread Logr dependencies.
+ fc := func(tic logr.TargetInfo) bool {
+ return f(TargetInfo(tic))
+ }
+ return l.logrLogger.Logr().RemoveTargets(ctx, fc)
+}
+
+// EnableMetrics enables metrics collection by supplying a MetricsCollector.
+// The MetricsCollector provides counters and gauges that are updated by log targets.
+func (l *Logger) EnableMetrics(collector logr.MetricsCollector) error {
+ return l.logrLogger.Logr().SetMetricsCollector(collector)
+}
+
+// DisableZap is called to disable Zap, and Logr will be used instead. Any Logger
+// instances created after this call will only use Logr.
+//
+// This is needed for unit testing as Zap has no shutdown capabilities
+// and holds file handles until process exit. Currently unit tests create
+// many server instances, and thus many Zap log file handles.
+//
+// This method will be removed when Zap is permanently replaced.
+func DisableZap() {
+ atomic.StoreInt32(&disableZap, 1)
+}
+
+// EnableZap re-enables Zap such that any Logger instances created after this
+// call will allow Zap targets.
+func EnableZap() {
+ atomic.StoreInt32(&disableZap, 0)
}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/logr.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/logr.go
new file mode 100644
index 00000000..01b39024
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/logr.go
@@ -0,0 +1,247 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See LICENSE.txt for license information.
+
+package mlog
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/mattermost/logr"
+ logrFmt "github.com/mattermost/logr/format"
+ "github.com/mattermost/logr/target"
+ "go.uber.org/zap/zapcore"
+)
+
+const (
+ DefaultMaxTargetQueue = 1000
+ DefaultSysLogPort = 514
+)
+
+type LogLevel struct {
+ ID logr.LevelID
+ Name string
+ Stacktrace bool
+}
+
+type LogTarget struct {
+ Type string // one of "console", "file", "tcp", "syslog", "none".
+ Format string // one of "json", "plain"
+ Levels []LogLevel
+ Options json.RawMessage
+ MaxQueueSize int
+}
+
+type LogTargetCfg map[string]*LogTarget
+type LogrCleanup func() error
+
+func newLogr() *logr.Logger {
+ lgr := &logr.Logr{}
+ lgr.OnExit = func(int) {}
+ lgr.OnPanic = func(interface{}) {}
+ lgr.OnLoggerError = onLoggerError
+ lgr.OnQueueFull = onQueueFull
+ lgr.OnTargetQueueFull = onTargetQueueFull
+
+ logger := lgr.NewLogger()
+ return &logger
+}
+
+func logrAddTargets(logger *logr.Logger, targets LogTargetCfg) error {
+ lgr := logger.Logr()
+ var errs error
+ for name, t := range targets {
+ target, err := NewLogrTarget(name, t)
+ if err != nil {
+ errs = multierror.Append(err)
+ continue
+ }
+ if target != nil {
+ target.SetName(name)
+ lgr.AddTarget(target)
+ }
+ }
+ return errs
+}
+
+// NewLogrTarget creates a `logr.Target` based on a target config.
+// Can be used when parsing custom config files, or when programmatically adding
+// built-in targets. Use `mlog.AddTarget` to add custom targets.
+func NewLogrTarget(name string, t *LogTarget) (logr.Target, error) {
+ formatter, err := newFormatter(name, t.Format)
+ if err != nil {
+ return nil, err
+ }
+ filter, err := newFilter(name, t.Levels)
+ if err != nil {
+ return nil, err
+ }
+
+ if t.MaxQueueSize == 0 {
+ t.MaxQueueSize = DefaultMaxTargetQueue
+ }
+
+ switch t.Type {
+ case "console":
+ return newConsoleTarget(name, t, filter, formatter)
+ case "file":
+ return newFileTarget(name, t, filter, formatter)
+ case "syslog":
+ return newSyslogTarget(name, t, filter, formatter)
+ case "tcp":
+ return newTCPTarget(name, t, filter, formatter)
+ case "none":
+ return nil, nil
+ }
+ return nil, fmt.Errorf("invalid type '%s' for target %s", t.Type, name)
+}
+
+func newFilter(name string, levels []LogLevel) (logr.Filter, error) {
+ filter := &logr.CustomFilter{}
+ for _, lvl := range levels {
+ filter.Add(logr.Level(lvl))
+ }
+ return filter, nil
+}
+
+func newFormatter(name string, format string) (logr.Formatter, error) {
+ switch format {
+ case "json", "":
+ return &logrFmt.JSON{}, nil
+ case "plain":
+ return &logrFmt.Plain{Delim: " | "}, nil
+ default:
+ return nil, fmt.Errorf("invalid format '%s' for target %s", format, name)
+ }
+}
+
+func newConsoleTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) {
+ type consoleOptions struct {
+ Out string `json:"Out"`
+ }
+ options := &consoleOptions{}
+ if err := json.Unmarshal(t.Options, options); err != nil {
+ return nil, err
+ }
+
+ var w io.Writer
+ switch options.Out {
+ case "stdout", "":
+ w = os.Stdout
+ case "stderr":
+ w = os.Stderr
+ default:
+ return nil, fmt.Errorf("invalid out '%s' for target %s", options.Out, name)
+ }
+
+ newTarget := target.NewWriterTarget(filter, formatter, w, t.MaxQueueSize)
+ return newTarget, nil
+}
+
+func newFileTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) {
+ type fileOptions struct {
+ Filename string `json:"Filename"`
+ MaxSize int `json:"MaxSizeMB"`
+ MaxAge int `json:"MaxAgeDays"`
+ MaxBackups int `json:"MaxBackups"`
+ Compress bool `json:"Compress"`
+ }
+ options := &fileOptions{}
+ if err := json.Unmarshal(t.Options, options); err != nil {
+ return nil, err
+ }
+ return newFileTargetWithOpts(name, t, target.FileOptions(*options), filter, formatter)
+}
+
+func newFileTargetWithOpts(name string, t *LogTarget, opts target.FileOptions, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) {
+ if opts.Filename == "" {
+ return nil, fmt.Errorf("missing 'Filename' option for target %s", name)
+ }
+ if err := checkFileWritable(opts.Filename); err != nil {
+ return nil, fmt.Errorf("error writing to 'Filename' for target %s: %w", name, err)
+ }
+
+ newTarget := target.NewFileTarget(filter, formatter, opts, t.MaxQueueSize)
+ return newTarget, nil
+}
+
+func newSyslogTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) {
+ options := &SyslogParams{}
+ if err := json.Unmarshal(t.Options, options); err != nil {
+ return nil, err
+ }
+
+ if options.IP == "" {
+ return nil, fmt.Errorf("missing 'IP' option for target %s", name)
+ }
+ if options.Port == 0 {
+ options.Port = DefaultSysLogPort
+ }
+ return NewSyslogTarget(filter, formatter, options, t.MaxQueueSize)
+}
+
+func newTCPTarget(name string, t *LogTarget, filter logr.Filter, formatter logr.Formatter) (logr.Target, error) {
+ options := &TcpParams{}
+ if err := json.Unmarshal(t.Options, options); err != nil {
+ return nil, err
+ }
+
+ if options.IP == "" {
+ return nil, fmt.Errorf("missing 'IP' option for target %s", name)
+ }
+ if options.Port == 0 {
+ return nil, fmt.Errorf("missing 'Port' option for target %s", name)
+ }
+ return NewTcpTarget(filter, formatter, options, t.MaxQueueSize)
+}
+
+func checkFileWritable(filename string) error {
+ // try opening/creating the file for writing
+ file, err := os.OpenFile(filename, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
+ if err != nil {
+ return err
+ }
+ file.Close()
+ return nil
+}
+
+func isLevelEnabled(logger *logr.Logger, level logr.Level) bool {
+ if logger == nil || logger.Logr() == nil {
+ return false
+ }
+
+ status := logger.Logr().IsLevelEnabled(level)
+ return status.Enabled
+}
+
+// zapToLogr converts Zap fields to Logr fields.
+// This will not be needed once Logr is used for all logging.
+func zapToLogr(zapFields []Field) logr.Fields {
+ encoder := zapcore.NewMapObjectEncoder()
+ for _, zapField := range zapFields {
+ zapField.AddTo(encoder)
+ }
+ return logr.Fields(encoder.Fields)
+}
+
+// mlogLevelToLogrLevel converts a mlog logger level to
+// an array of discrete Logr levels.
+func mlogLevelToLogrLevels(level string) []LogLevel {
+ levels := make([]LogLevel, 0)
+ levels = append(levels, LvlError, LvlPanic, LvlFatal, LvlStdLog)
+
+ switch level {
+ case LevelDebug:
+ levels = append(levels, LvlDebug)
+ fallthrough
+ case LevelInfo:
+ levels = append(levels, LvlInfo)
+ fallthrough
+ case LevelWarn:
+ levels = append(levels, LvlWarn)
+ }
+ return levels
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/syslog.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/syslog.go
new file mode 100644
index 00000000..8766a964
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/syslog.go
@@ -0,0 +1,142 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See LICENSE.txt for license information.
+
+package mlog
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io/ioutil"
+
+ "github.com/mattermost/logr"
+ "github.com/wiggin77/merror"
+ syslog "github.com/wiggin77/srslog"
+)
+
+// Syslog outputs log records to local or remote syslog.
+type Syslog struct {
+ logr.Basic
+ w *syslog.Writer
+}
+
+// SyslogParams provides parameters for dialing a syslog daemon.
+type SyslogParams struct {
+ IP string `json:"IP"`
+ Port int `json:"Port"`
+ Tag string `json:"Tag"`
+ TLS bool `json:"TLS"`
+ Cert string `json:"Cert"`
+ Insecure bool `json:"Insecure"`
+}
+
+// NewSyslogTarget creates a target capable of outputting log records to remote or local syslog, with or without TLS.
+func NewSyslogTarget(filter logr.Filter, formatter logr.Formatter, params *SyslogParams, maxQueue int) (*Syslog, error) {
+ network := "tcp"
+ var config *tls.Config
+
+ if params.TLS {
+ network = "tcp+tls"
+ config = &tls.Config{InsecureSkipVerify: params.Insecure}
+ if params.Cert != "" {
+ pool, err := getCertPool(params.Cert)
+ if err != nil {
+ return nil, err
+ }
+ config.RootCAs = pool
+ }
+ }
+ raddr := fmt.Sprintf("%s:%d", params.IP, params.Port)
+
+ writer, err := syslog.DialWithTLSConfig(network, raddr, syslog.LOG_INFO, params.Tag, config)
+ if err != nil {
+ return nil, err
+ }
+
+ s := &Syslog{w: writer}
+ s.Basic.Start(s, s, filter, formatter, maxQueue)
+
+ return s, nil
+}
+
+// Shutdown stops processing log records after making best effort to flush queue.
+func (s *Syslog) Shutdown(ctx context.Context) error {
+ errs := merror.New()
+
+ err := s.Basic.Shutdown(ctx)
+ errs.Append(err)
+
+ err = s.w.Close()
+ errs.Append(err)
+
+ return errs.ErrorOrNil()
+}
+
+// getCertPool returns a x509.CertPool containing the cert(s)
+// from `cert`, which can be a path to a .pem or .crt file,
+// or a base64 encoded cert.
+func getCertPool(cert string) (*x509.CertPool, error) {
+ if cert == "" {
+ return nil, errors.New("no cert provided")
+ }
+
+ // first treat as a file and try to read.
+ serverCert, err := ioutil.ReadFile(cert)
+ if err != nil {
+ // maybe it's a base64 encoded cert
+ serverCert, err = base64.StdEncoding.DecodeString(cert)
+ if err != nil {
+ return nil, errors.New("cert cannot be read")
+ }
+ }
+
+ pool := x509.NewCertPool()
+ if ok := pool.AppendCertsFromPEM(serverCert); ok {
+ return pool, nil
+ }
+ return nil, errors.New("cannot parse cert")
+}
+
+// Write converts the log record to bytes, via the Formatter,
+// and outputs to syslog.
+func (s *Syslog) Write(rec *logr.LogRec) error {
+ _, stacktrace := s.IsLevelEnabled(rec.Level())
+
+ buf := rec.Logger().Logr().BorrowBuffer()
+ defer rec.Logger().Logr().ReleaseBuffer(buf)
+
+ buf, err := s.Formatter().Format(rec, stacktrace, buf)
+ if err != nil {
+ return err
+ }
+ txt := buf.String()
+
+ switch rec.Level() {
+ case logr.Panic, logr.Fatal:
+ err = s.w.Crit(txt)
+ case logr.Error:
+ err = s.w.Err(txt)
+ case logr.Warn:
+ err = s.w.Warning(txt)
+ case logr.Debug, logr.Trace:
+ err = s.w.Debug(txt)
+ default:
+ // logr.Info plus all custom levels.
+ err = s.w.Info(txt)
+ }
+
+ if err != nil {
+ reporter := rec.Logger().Logr().ReportError
+ reporter(fmt.Errorf("syslog write fail: %w", err))
+ // syslog writer will try to reconnect.
+ }
+ return err
+}
+
+// String returns a string representation of this target.
+func (s *Syslog) String() string {
+ return "SyslogTarget"
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/tcp.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/tcp.go
new file mode 100644
index 00000000..dad20474
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/tcp.go
@@ -0,0 +1,274 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See LICENSE.txt for license information.
+
+package mlog
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/mattermost/logr"
+
+ _ "net/http/pprof"
+)
+
+const (
+ DialTimeoutSecs = 30
+ WriteTimeoutSecs = 30
+ RetryBackoffMillis int64 = 100
+ MaxRetryBackoffMillis int64 = 30 * 1000 // 30 seconds
+)
+
+// Tcp outputs log records to raw socket server.
+type Tcp struct {
+ logr.Basic
+
+ params *TcpParams
+ addy string
+
+ mutex sync.Mutex
+ conn net.Conn
+ monitor chan struct{}
+ shutdown chan struct{}
+}
+
+// TcpParams provides parameters for dialing a socket server.
+type TcpParams struct {
+ IP string `json:"IP"`
+ Port int `json:"Port"`
+ TLS bool `json:"TLS"`
+ Cert string `json:"Cert"`
+ Insecure bool `json:"Insecure"`
+}
+
+// NewTcpTarget creates a target capable of outputting log records to a raw socket, with or without TLS.
+func NewTcpTarget(filter logr.Filter, formatter logr.Formatter, params *TcpParams, maxQueue int) (*Tcp, error) {
+ tcp := &Tcp{
+ params: params,
+ addy: fmt.Sprintf("%s:%d", params.IP, params.Port),
+ monitor: make(chan struct{}),
+ shutdown: make(chan struct{}),
+ }
+ tcp.Basic.Start(tcp, tcp, filter, formatter, maxQueue)
+
+ return tcp, nil
+}
+
+// getConn provides a net.Conn. If a connection already exists, it is returned immediately,
+// otherwise this method blocks until a new connection is created, timeout or shutdown.
+func (tcp *Tcp) getConn() (net.Conn, error) {
+ tcp.mutex.Lock()
+ defer tcp.mutex.Unlock()
+
+ Log(LvlTcpLogTarget, "getConn enter", String("addy", tcp.addy))
+ defer Log(LvlTcpLogTarget, "getConn exit", String("addy", tcp.addy))
+
+ if tcp.conn != nil {
+ Log(LvlTcpLogTarget, "reusing existing conn", String("addy", tcp.addy)) // use "With" once Zap is removed
+ return tcp.conn, nil
+ }
+
+ type result struct {
+ conn net.Conn
+ err error
+ }
+
+ connChan := make(chan result)
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*DialTimeoutSecs)
+ defer cancel()
+
+ go func(ctx context.Context, ch chan result) {
+ Log(LvlTcpLogTarget, "dailing", String("addy", tcp.addy))
+ conn, err := tcp.dial(ctx)
+ if err == nil {
+ tcp.conn = conn
+ tcp.monitor = make(chan struct{})
+ go monitor(tcp.conn, tcp.monitor)
+ }
+ connChan <- result{conn: conn, err: err}
+ }(ctx, connChan)
+
+ select {
+ case <-tcp.shutdown:
+ return nil, errors.New("shutdown")
+ case res := <-connChan:
+ return res.conn, res.err
+ }
+}
+
+// dial connects to a TCP socket, and optionally performs a TLS handshake.
+// A non-nil context must be provided which can cancel the dial.
+func (tcp *Tcp) dial(ctx context.Context) (net.Conn, error) {
+ var dialer net.Dialer
+ dialer.Timeout = time.Second * DialTimeoutSecs
+ conn, err := dialer.DialContext(ctx, "tcp", fmt.Sprintf("%s:%d", tcp.params.IP, tcp.params.Port))
+ if err != nil {
+ return nil, err
+ }
+
+ if !tcp.params.TLS {
+ return conn, nil
+ }
+
+ Log(LvlTcpLogTarget, "TLS handshake", String("addy", tcp.addy))
+
+ tlsconfig := &tls.Config{
+ ServerName: tcp.params.IP,
+ InsecureSkipVerify: tcp.params.Insecure,
+ }
+ if tcp.params.Cert != "" {
+ pool, err := getCertPool(tcp.params.Cert)
+ if err != nil {
+ return nil, err
+ }
+ tlsconfig.RootCAs = pool
+ }
+
+ tlsConn := tls.Client(conn, tlsconfig)
+ if err := tlsConn.Handshake(); err != nil {
+ return nil, err
+ }
+ return tlsConn, nil
+}
+
+func (tcp *Tcp) close() error {
+ tcp.mutex.Lock()
+ defer tcp.mutex.Unlock()
+
+ var err error
+ if tcp.conn != nil {
+ Log(LvlTcpLogTarget, "closing connection", String("addy", tcp.addy))
+ close(tcp.monitor)
+ err = tcp.conn.Close()
+ tcp.conn = nil
+ }
+ return err
+}
+
+// Shutdown stops processing log records after making best effort to flush queue.
+func (tcp *Tcp) Shutdown(ctx context.Context) error {
+ errs := &multierror.Error{}
+
+ Log(LvlTcpLogTarget, "shutting down", String("addy", tcp.addy))
+
+ if err := tcp.Basic.Shutdown(ctx); err != nil {
+ errs = multierror.Append(errs, err)
+ }
+
+ if err := tcp.close(); err != nil {
+ errs = multierror.Append(errs, err)
+ }
+
+ close(tcp.shutdown)
+ return errs.ErrorOrNil()
+}
+
+// Write converts the log record to bytes, via the Formatter, and outputs to the socket.
+// Called by dedicated target goroutine and will block until success or shutdown.
+func (tcp *Tcp) Write(rec *logr.LogRec) error {
+ _, stacktrace := tcp.IsLevelEnabled(rec.Level())
+
+ buf := rec.Logger().Logr().BorrowBuffer()
+ defer rec.Logger().Logr().ReleaseBuffer(buf)
+
+ buf, err := tcp.Formatter().Format(rec, stacktrace, buf)
+ if err != nil {
+ return err
+ }
+
+ try := 1
+ backoff := RetryBackoffMillis
+ for {
+ select {
+ case <-tcp.shutdown:
+ return err
+ default:
+ }
+
+ conn, err := tcp.getConn()
+ if err != nil {
+ Log(LvlTcpLogTarget, "failed getting connection", String("addy", tcp.addy), Err(err))
+ reporter := rec.Logger().Logr().ReportError
+ reporter(fmt.Errorf("log target %s connection error: %w", tcp.String(), err))
+ backoff = tcp.sleep(backoff)
+ continue
+ }
+
+ conn.SetWriteDeadline(time.Now().Add(time.Second * WriteTimeoutSecs))
+ _, err = buf.WriteTo(conn)
+ if err == nil {
+ return nil
+ }
+
+ Log(LvlTcpLogTarget, "write error", String("addy", tcp.addy), Err(err))
+ reporter := rec.Logger().Logr().ReportError
+ reporter(fmt.Errorf("log target %s write error: %w", tcp.String(), err))
+
+ _ = tcp.close()
+
+ backoff = tcp.sleep(backoff)
+ try++
+ Log(LvlTcpLogTarget, "retrying write", String("addy", tcp.addy), Int("try", try))
+ }
+}
+
+// monitor continuously tries to read from the connection to detect socket close.
+// This is needed because TCP target uses a write only socket and Linux systems
+// take a long time to detect a loss of connectivity on a socket when only writing;
+// the writes simply fail without an error returned.
+func monitor(conn net.Conn, done <-chan struct{}) {
+ addy := conn.RemoteAddr().String()
+ defer Log(LvlTcpLogTarget, "monitor exiting", String("addy", addy))
+
+ buf := make([]byte, 1)
+ for {
+ Log(LvlTcpLogTarget, "monitor loop", String("addy", addy))
+
+ select {
+ case <-done:
+ return
+ case <-time.After(1 * time.Second):
+ }
+
+ err := conn.SetReadDeadline(time.Now().Add(time.Second * 30))
+ if err != nil {
+ continue
+ }
+
+ _, err = conn.Read(buf)
+
+ if errt, ok := err.(net.Error); ok && errt.Timeout() {
+ // read timeout is expected, keep looping.
+ continue
+ }
+
+ // Any other error closes the connection, forcing a reconnect.
+ Log(LvlTcpLogTarget, "monitor closing connection", Err(err))
+ conn.Close()
+ return
+ }
+}
+
+// String returns a string representation of this target.
+func (tcp *Tcp) String() string {
+ return fmt.Sprintf("TcpTarget[%s:%d]", tcp.params.IP, tcp.params.Port)
+}
+
+func (tcp *Tcp) sleep(backoff int64) int64 {
+ select {
+ case <-tcp.shutdown:
+ case <-time.After(time.Millisecond * time.Duration(backoff)):
+ }
+
+ nextBackoff := backoff + (backoff >> 1)
+ if nextBackoff > MaxRetryBackoffMillis {
+ nextBackoff = MaxRetryBackoffMillis
+ }
+ return nextBackoff
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/test-tls-client-cert.pem b/vendor/github.com/mattermost/mattermost-server/v5/mlog/test-tls-client-cert.pem
new file mode 100644
index 00000000..6ce8d042
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/test-tls-client-cert.pem
@@ -0,0 +1,43 @@
+-----BEGIN CERTIFICATE-----
+MIIDjzCCAnegAwIBAgIRAPYfRSwdzKopBKxYxKqslJUwDQYJKoZIhvcNAQELBQAw
+JzElMCMGA1UEAwwcTWF0dGVybW9zdCwgSW5jLiBJbnRlcm5hbCBDQTAeFw0xOTAz
+MjIwMDE0MTVaFw0yMjAzMDYwMDE0MTVaMDsxOTA3BgNVBAMTME1hdHRlcm1vc3Qs
+IEluYy4gSW50ZXJuYWwgSW50ZXJtZWRpYXRlIEF1dGhvcml0eTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAMjliRdmvnNL4u/Jr/M2dPwQmTJXEBY/Vq9Q
+vAU52X3tRMCPxcaFz+x6ftuvdO2NdohXGAmtx9QU5LZcvFeTDpoVEBo9A+4jtLvD
+DZYaTNLpJmoSoJHaDbdWX+OAOqyDiWS741LuiMKWHhew9QOisat2ZINPxjmAd9wE
+xthTMgzsv7MUqnMer8U5OGQ0Qy7wAmNRc+2K3qPwkxe2RUvcte50DUFNgxEginsh
+vrkOXR383vUCZfu72qu8oggjiQpyTllu5je2Ap6JLjYLkEMiMqrYADuWor/ZHwa6
+WrFqVETxWfAV5u9Eh0wZM/KKYwRQuw9y+Nans77FmUl1tVWWNN8CAwEAAaOBoTCB
+njAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBQY4Uqswyr2hO/HetZt2RDxJdTIPjBi
+BgNVHSMEWzBZgBRFZXVg2Z5tNIsWeWjBLEy2yzKbMKErpCkwJzElMCMGA1UEAwwc
+TWF0dGVybW9zdCwgSW5jLiBJbnRlcm5hbCBDQYIUEifGUOM+bIFZo1tkjZB5YGBr
+0xEwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4IBAQAEdexL30Q0zBHmPAH8
+LhdK7dbzW1CmILbxRZlKAwRN+hKRXiMW3MHIkhNuoV9Aev602Q+ja4lWsRi/ktOL
+ni1FWx5gSScgdG8JGj47dOmoT3vXKX7+umiv4rQLPDl9/DKMuv204OYJq6VT+uNU
+6C6kL157jGJEO76H4fMZ8oYsD7Sq0zjiNKtuCYii0ngH3j3gB1jACLqRgveU7MdT
+pqOV2KfY31+h8VBtkUvljNztQ9xNY8Fjmt0SMf7E3FaUcaar3ZCr70G5aU3dKbe7
+47vGOBa5tCqw4YK0jgDKid3IJQul9a3J1mSsH8Wy3to9cAV4KGZBQLnzCX15a/+v
+3yVh
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDfjCCAmagAwIBAgIUEifGUOM+bIFZo1tkjZB5YGBr0xEwDQYJKoZIhvcNAQEL
+BQAwJzElMCMGA1UEAwwcTWF0dGVybW9zdCwgSW5jLiBJbnRlcm5hbCBDQTAeFw0x
+OTAzMjEyMTI4NDNaFw0yOTAzMTgyMTI4NDNaMCcxJTAjBgNVBAMMHE1hdHRlcm1v
+c3QsIEluYy4gSW50ZXJuYWwgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQDH0Xq5rMBGpKOVWTpb5MnaJIWFP/vOtvEk+7hVrfOfe1/5x0Kk3UgAHj85
+otaEZD1Lhn/JLkEqCiE/UXMJFwJDlNcO4CkdKBSpYX4bKAqy5q/X3QwioMSNpJG1
++YYrNGBH0sgKcKjyCaLhmqYLD0xZDVOmWIYBU9jUPyXw5U0tnsVrTqGMxVkm1xCY
+krCWN1ZoUrLvL0MCZc5qpxoPTopr9UO9cqSBSuy6BVWVuEWBZhpqHt+ul8VxhzzY
+q1k4l7r2qw+/wm1iJBedTeBVeWNag8JaVfLgu+/W7oJVlPO32Po7pnvHp8iJ3b4K
+zXyVHaTX4S6Em+6LV8855TYrShzlAgMBAAGjgaEwgZ4wHQYDVR0OBBYEFEVldWDZ
+nm00ixZ5aMEsTLbLMpswMGIGA1UdIwRbMFmAFEVldWDZnm00ixZ5aMEsTLbLMpsw
+oSukKTAnMSUwIwYDVQQDDBxNYXR0ZXJtb3N0LCBJbmMuIEludGVybmFsIENBghQS
+J8ZQ4z5sgVmjW2SNkHlgYGvTETAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjAN
+BgkqhkiG9w0BAQsFAAOCAQEAPiCWFmopyAkY2T3Zyo4yaRPhX1+VOTMKJtY6EUhq
+/GHz6kzEyvCUBf0N892cibGxekrEoItY9NqO6RQRfowg+Gn5kc13z4NyL2W8/eoT
+Xy0ZvfaQbU++fQ6pVtWtMblDMU9xiYd7/MDvJpO328l1Vhcdp8kEi+lCvpy0sCRc
+PxzPhbgCMAbZEGx+4TMQd4SZKzlRxW/2fflpReh6v1Dv0VDUSYQWwsUnaLpdKHfh
+a5k0vuySYcszE4YKlY0zakeFlJfp7fBp1xTwcdW8aTfw15EicPMwTc6xxA4JJUJx
+cddu817n1nayK5u6r9Qh1oIVkr0nC9YELMMy4dpPgJ88SA==
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/mlog/testing.go b/vendor/github.com/mattermost/mattermost-server/v5/mlog/testing.go
index bf1bcedf..1f2f437f 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/mlog/testing.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/mlog/testing.go
@@ -32,9 +32,10 @@ func NewTestingLogger(tb testing.TB, writer io.Writer) *Logger {
testingLogger := &Logger{
consoleLevel: zap.NewAtomicLevelAt(getZapLevel("debug")),
fileLevel: zap.NewAtomicLevelAt(getZapLevel("info")),
+ logrLogger: newLogr(),
}
- logWriterCore := zapcore.NewCore(makeEncoder(true), logWriterSync, testingLogger.consoleLevel)
+ logWriterCore := zapcore.NewCore(makeEncoder(true), zapcore.Lock(logWriterSync), testingLogger.consoleLevel)
testingLogger.zap = zap.New(logWriterCore,
zap.AddCaller(),
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/bot.go b/vendor/github.com/mattermost/mattermost-server/v5/model/bot.go
index 15ef6a70..fb46be49 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/bot.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/bot.go
@@ -13,9 +13,10 @@ import (
)
const (
- BOT_DISPLAY_NAME_MAX_RUNES = USER_FIRST_NAME_MAX_RUNES
- BOT_DESCRIPTION_MAX_RUNES = 1024
- BOT_CREATOR_ID_MAX_RUNES = KEY_VALUE_PLUGIN_ID_MAX_RUNES // UserId or PluginId
+ BOT_DISPLAY_NAME_MAX_RUNES = USER_FIRST_NAME_MAX_RUNES
+ BOT_DESCRIPTION_MAX_RUNES = 1024
+ BOT_CREATOR_ID_MAX_RUNES = KEY_VALUE_PLUGIN_ID_MAX_RUNES // UserId or PluginId
+ BOT_WARN_METRIC_BOT_USERNAME = "mattermost-advisor"
)
// Bot is a special type of User meant for programmatic interactions.
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel.go
index 6a84b355..282271ad 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/channel.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel.go
@@ -120,12 +120,18 @@ type ChannelModeratedRolesPatch struct {
// PerPage number of results per page, if paginated.
//
type ChannelSearchOpts struct {
- NotAssociatedToGroup string
- ExcludeDefaultChannels bool
- IncludeDeleted bool
- ExcludeChannelNames []string
- Page *int
- PerPage *int
+ NotAssociatedToGroup string
+ ExcludeDefaultChannels bool
+ IncludeDeleted bool
+ Deleted bool
+ ExcludeChannelNames []string
+ TeamIds []string
+ GroupConstrained bool
+ ExcludeGroupConstrained bool
+ Public bool
+ Private bool
+ Page *int
+ PerPage *int
}
type ChannelMemberCountByGroup struct {
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history_result.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history_result.go
index 6197d410..8f43ca4e 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history_result.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_member_history_result.go
@@ -10,7 +10,8 @@ type ChannelMemberHistoryResult struct {
LeaveTime *int64
// these two fields are never set in the database - when we SELECT, we join on Users to get them
- UserEmail string `db:"Email"`
- Username string
- IsBot bool
+ UserEmail string `db:"Email"`
+ Username string
+ IsBot bool
+ UserDeleteAt int64
}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_search.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_search.go
index 2e994227..87fd3aef 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_search.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_search.go
@@ -11,11 +11,18 @@ import (
const CHANNEL_SEARCH_DEFAULT_LIMIT = 50
type ChannelSearch struct {
- Term string `json:"term"`
- ExcludeDefaultChannels bool `json:"exclude_default_channels"`
- NotAssociatedToGroup string `json:"not_associated_to_group"`
- Page *int `json:"page,omitempty"`
- PerPage *int `json:"per_page,omitempty"`
+ Term string `json:"term"`
+ ExcludeDefaultChannels bool `json:"exclude_default_channels"`
+ NotAssociatedToGroup string `json:"not_associated_to_group"`
+ TeamIds []string `json:"team_ids"`
+ GroupConstrained bool `json:"group_constrained"`
+ ExcludeGroupConstrained bool `json:"exclude_group_constrained"`
+ Public bool `json:"public"`
+ Private bool `json:"private"`
+ IncludeDeleted bool `json:"include_deleted"`
+ Deleted bool `json:"deleted"`
+ Page *int `json:"page,omitempty"`
+ PerPage *int `json:"per_page,omitempty"`
}
// ToJson convert a Channel to a json string
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/channel_sidebar.go b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_sidebar.go
new file mode 100644
index 00000000..6a79593c
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/channel_sidebar.go
@@ -0,0 +1,111 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See LICENSE.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type SidebarCategoryType string
+type SidebarCategorySorting string
+
+const (
+ // Each sidebar category has a 'type'. System categories are Channels, Favorites and DMs
+ // All user-created categories will have type Custom
+ SidebarCategoryChannels SidebarCategoryType = "channels"
+ SidebarCategoryDirectMessages SidebarCategoryType = "direct_messages"
+ SidebarCategoryFavorites SidebarCategoryType = "favorites"
+ SidebarCategoryCustom SidebarCategoryType = "custom"
+ // Increment to use when adding/reordering things in the sidebar
+ MinimalSidebarSortDistance = 10
+ // Default Sort Orders for categories
+ DefaultSidebarSortOrderFavorites = 0
+ DefaultSidebarSortOrderChannels = DefaultSidebarSortOrderFavorites + MinimalSidebarSortDistance
+ DefaultSidebarSortOrderDMs = DefaultSidebarSortOrderChannels + MinimalSidebarSortDistance
+ // Sorting modes
+ // default for all categories except DMs (behaves like manual)
+ SidebarCategorySortDefault SidebarCategorySorting = ""
+ // sort manually
+ SidebarCategorySortManual SidebarCategorySorting = "manual"
+ // sort by recency (default for DMs)
+ SidebarCategorySortRecent SidebarCategorySorting = "recent"
+ // sort by display name alphabetically
+ SidebarCategorySortAlphabetical SidebarCategorySorting = "alpha"
+)
+
+// SidebarCategory represents the corresponding DB table
+// SortOrder is never returned to the user and only used for queries
+type SidebarCategory struct {
+ Id string `json:"id"`
+ UserId string `json:"user_id"`
+ TeamId string `json:"team_id"`
+ SortOrder int64 `json:"-"`
+ Sorting SidebarCategorySorting `json:"sorting"`
+ Type SidebarCategoryType `json:"type"`
+ DisplayName string `json:"display_name"`
+}
+
+// SidebarCategoryWithChannels combines data from SidebarCategory table with the Channel IDs that belong to that category
+type SidebarCategoryWithChannels struct {
+ SidebarCategory
+ Channels []string `json:"channel_ids"`
+}
+
+type SidebarCategoryOrder []string
+
+// OrderedSidebarCategories combines categories, their channel IDs and an array of Category IDs, sorted
+type OrderedSidebarCategories struct {
+ Categories SidebarCategoriesWithChannels `json:"categories"`
+ Order SidebarCategoryOrder `json:"order"`
+}
+
+type SidebarChannel struct {
+ ChannelId string `json:"channel_id"`
+ UserId string `json:"user_id"`
+ CategoryId string `json:"category_id"`
+ SortOrder int64 `json:"-"`
+}
+
+type SidebarChannels []*SidebarChannel
+type SidebarCategoriesWithChannels []*SidebarCategoryWithChannels
+
+func SidebarCategoryFromJson(data io.Reader) (*SidebarCategoryWithChannels, error) {
+ var o *SidebarCategoryWithChannels
+ err := json.NewDecoder(data).Decode(&o)
+ return o, err
+}
+
+func SidebarCategoriesFromJson(data io.Reader) ([]*SidebarCategoryWithChannels, error) {
+ var o []*SidebarCategoryWithChannels
+ err := json.NewDecoder(data).Decode(&o)
+ return o, err
+}
+
+func OrderedSidebarCategoriesFromJson(data io.Reader) (*OrderedSidebarCategories, error) {
+ var o *OrderedSidebarCategories
+ err := json.NewDecoder(data).Decode(&o)
+ return o, err
+}
+
+func (o SidebarCategoryWithChannels) ToJson() []byte {
+ b, _ := json.Marshal(o)
+ return b
+}
+
+func SidebarCategoriesWithChannelsToJson(o []*SidebarCategoryWithChannels) []byte {
+ if b, err := json.Marshal(o); err != nil {
+ return []byte("[]")
+ } else {
+ return b
+ }
+}
+
+func (o OrderedSidebarCategories) ToJson() []byte {
+ if b, err := json.Marshal(o); err != nil {
+ return []byte("[]")
+ } else {
+ return b
+ }
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/client4.go b/vendor/github.com/mattermost/mattermost-server/v5/model/client4.go
index b522ecb8..b3c34c39 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/client4.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/client4.go
@@ -10,6 +10,7 @@ import (
"io"
"io/ioutil"
"mime/multipart"
+ "net"
"net/http"
"net/url"
"strconv"
@@ -61,6 +62,40 @@ type Client4 struct {
AuthToken string
AuthType string
HttpHeader map[string]string // Headers to be copied over for each request
+
+ // TrueString is the string value sent to the server for true boolean query parameters.
+ trueString string
+
+ // FalseString is the string value sent to the server for false boolean query parameters.
+ falseString string
+}
+
+// SetBoolString is a helper method for overriding how true and false query string parameters are
+// sent to the server.
+//
+// This method is only exposed for testing. It is never necessary to configure these values
+// in production.
+func (c *Client4) SetBoolString(value bool, valueStr string) {
+ if value {
+ c.trueString = valueStr
+ } else {
+ c.falseString = valueStr
+ }
+}
+
+// boolString builds the query string parameter for boolean values.
+func (c *Client4) boolString(value bool) string {
+ if value && c.trueString != "" {
+ return c.trueString
+ } else if !value && c.falseString != "" {
+ return c.falseString
+ }
+
+ if value {
+ return "true"
+ } else {
+ return "false"
+ }
}
func closeBody(r *http.Response) {
@@ -81,7 +116,21 @@ func (c *Client4) Must(result interface{}, resp *Response) interface{} {
}
func NewAPIv4Client(url string) *Client4 {
- return &Client4{url, url + API_URL_SUFFIX, &http.Client{}, "", "", map[string]string{}}
+ url = strings.TrimRight(url, "/")
+ return &Client4{url, url + API_URL_SUFFIX, &http.Client{}, "", "", map[string]string{}, "", ""}
+}
+
+func NewAPIv4SocketClient(socketPath string) *Client4 {
+ tr := &http.Transport{
+ Dial: func(network, addr string) (net.Conn, error) {
+ return net.Dial("unix", socketPath)
+ },
+ }
+
+ client := NewAPIv4Client("http://_")
+ client.HttpClient = &http.Client{Transport: tr}
+
+ return client
}
func BuildErrorResponse(r *http.Response, err *AppError) *Response {
@@ -140,6 +189,10 @@ func (c *Client4) GetUserRoute(userId string) string {
return fmt.Sprintf(c.GetUsersRoute()+"/%v", userId)
}
+func (c *Client4) GetUserCategoryRoute(userID, teamID string) string {
+ return c.GetUserRoute(userID) + c.GetTeamRoute(teamID) + "/channels/categories"
+}
+
func (c *Client4) GetUserAccessTokensRoute() string {
return fmt.Sprintf(c.GetUsersRoute() + "/tokens")
}
@@ -261,6 +314,14 @@ func (c *Client4) GetFileRoute(fileId string) string {
return fmt.Sprintf(c.GetFilesRoute()+"/%v", fileId)
}
+func (c *Client4) GetUploadsRoute() string {
+ return "/uploads"
+}
+
+func (c *Client4) GetUploadRoute(uploadId string) string {
+ return fmt.Sprintf("%s/%s", c.GetUploadsRoute(), uploadId)
+}
+
func (c *Client4) GetPluginsRoute() string {
return "/plugins"
}
@@ -453,6 +514,10 @@ func (c *Client4) GetGroupsRoute() string {
return "/groups"
}
+func (c *Client4) GetPublishUserTypingRoute(userId string) string {
+ return c.GetUserRoute(userId) + "/typing"
+}
+
func (c *Client4) GetGroupRoute(groupID string) string {
return fmt.Sprintf("%s/%s", c.GetGroupsRoute(), groupID)
}
@@ -650,7 +715,7 @@ func (c *Client4) LoginByLdap(loginId string, password string) (*User, *Response
m := make(map[string]string)
m["login_id"] = loginId
m["password"] = password
- m["ldap_only"] = "true"
+ m["ldap_only"] = c.boolString(true)
return c.login(m)
}
@@ -967,6 +1032,17 @@ func (c *Client4) GetUsersWithoutTeam(page int, perPage int, etag string) ([]*Us
return UserListFromJson(r.Body), BuildResponse(r)
}
+// GetUsersInGroup returns a page of users in a group. Page counting starts at 0.
+func (c *Client4) GetUsersInGroup(groupID string, page int, perPage int, etag string) ([]*User, *Response) {
+ query := fmt.Sprintf("?in_group=%v&page=%v&per_page=%v", groupID, page, perPage)
+ r, err := c.DoApiGet(c.GetUsersRoute()+query, etag)
+ if err != nil {
+ return nil, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return UserListFromJson(r.Body), BuildResponse(r)
+}
+
// GetUsersByIds returns a list of users based on the provided user ids.
func (c *Client4) GetUsersByIds(userIds []string) ([]*User, *Response) {
r, err := c.DoApiPost(c.GetUsersRoute()+"/ids", ArrayToJson(userIds))
@@ -1119,6 +1195,17 @@ func (c *Client4) UpdateUserPassword(userId, currentPassword, newPassword string
return CheckStatusOK(r), BuildResponse(r)
}
+// UpdateUserHashedPassword updates a user's password with an already-hashed password. Must be a system administrator.
+func (c *Client4) UpdateUserHashedPassword(userId, newHashedPassword string) (bool, *Response) {
+ requestBody := map[string]string{"already_hashed": "true", "new_password": newHashedPassword}
+ r, err := c.DoApiPut(c.GetUserRoute(userId)+"/password", MapToJson(requestBody))
+ if err != nil {
+ return false, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+}
+
// PromoteGuestToUser convert a guest into a regular user
func (c *Client4) PromoteGuestToUser(guestId string) (bool, *Response) {
r, err := c.DoApiPost(c.GetUserRoute(guestId)+"/promote", "")
@@ -1173,6 +1260,50 @@ func (c *Client4) DeleteUser(userId string) (bool, *Response) {
return CheckStatusOK(r), BuildResponse(r)
}
+// PermanentDeleteUser deletes a user in the system based on the provided user id string.
+func (c *Client4) PermanentDeleteUser(userId string) (bool, *Response) {
+ r, err := c.DoApiDelete(c.GetUserRoute(userId) + "?permanent=" + c.boolString(true))
+ if err != nil {
+ return false, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+}
+
+// ConvertUserToBot converts a user to a bot user.
+func (c *Client4) ConvertUserToBot(userId string) (*Bot, *Response) {
+ r, err := c.DoApiPost(c.GetUserRoute(userId)+"/convert_to_bot", "")
+ if err != nil {
+ return nil, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return BotFromJson(r.Body), BuildResponse(r)
+}
+
+// ConvertBotToUser converts a bot user to a user.
+func (c *Client4) ConvertBotToUser(userId string, userPatch *UserPatch, setSystemAdmin bool) (*User, *Response) {
+ var query string
+ if setSystemAdmin {
+ query = "?set_system_admin=true"
+ }
+ r, err := c.DoApiPost(c.GetBotRoute(userId)+"/convert_to_user"+query, userPatch.ToJson())
+ if err != nil {
+ return nil, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return UserFromJson(r.Body), BuildResponse(r)
+}
+
+// PermanentDeleteAll permanently deletes all users in the system. This is a local only endpoint
+func (c *Client4) PermanentDeleteAllUsers() (bool, *Response) {
+ r, err := c.DoApiDelete(c.GetUsersRoute())
+ if err != nil {
+ return false, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+}
+
// SendPasswordResetEmail will send a link for password resetting to a user with the
// provided email.
func (c *Client4) SendPasswordResetEmail(email string) (bool, *Response) {
@@ -1287,6 +1418,16 @@ func (c *Client4) VerifyUserEmail(token string) (bool, *Response) {
return CheckStatusOK(r), BuildResponse(r)
}
+// VerifyUserEmailWithoutToken will verify a user's email by its Id. (Requires manage system role)
+func (c *Client4) VerifyUserEmailWithoutToken(userId string) (*User, *Response) {
+ r, err := c.DoApiPost(c.GetUserRoute(userId)+"/email/verify/member", "")
+ if err != nil {
+ return nil, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return UserFromJson(r.Body), BuildResponse(r)
+}
+
// SendVerificationEmail will send an email to the user with the provided email address, if
// that user exists. The email will contain a link that can be used to verify the user's
// email address.
@@ -1487,7 +1628,7 @@ func (c *Client4) GetBot(userId string, etag string) (*Bot, *Response) {
// GetBot fetches the given bot, even if it is deleted.
func (c *Client4) GetBotIncludeDeleted(userId string, etag string) (*Bot, *Response) {
- r, err := c.DoApiGet(c.GetBotRoute(userId)+"?include_deleted=true", etag)
+ r, err := c.DoApiGet(c.GetBotRoute(userId)+"?include_deleted="+c.boolString(true), etag)
if err != nil {
return nil, BuildErrorResponse(r, err)
}
@@ -1508,7 +1649,7 @@ func (c *Client4) GetBots(page, perPage int, etag string) ([]*Bot, *Response) {
// GetBotsIncludeDeleted fetches the given page of bots, including deleted.
func (c *Client4) GetBotsIncludeDeleted(page, perPage int, etag string) ([]*Bot, *Response) {
- query := fmt.Sprintf("?page=%v&per_page=%v&include_deleted=true", page, perPage)
+ query := fmt.Sprintf("?page=%v&per_page=%v&include_deleted="+c.boolString(true), page, perPage)
r, err := c.DoApiGet(c.GetBotsRoute()+query, etag)
if err != nil {
return nil, BuildErrorResponse(r, err)
@@ -1519,7 +1660,7 @@ func (c *Client4) GetBotsIncludeDeleted(page, perPage int, etag string) ([]*Bot,
// GetBotsOrphaned fetches the given page of bots, only including orphanded bots.
func (c *Client4) GetBotsOrphaned(page, perPage int, etag string) ([]*Bot, *Response) {
- query := fmt.Sprintf("?page=%v&per_page=%v&only_orphaned=true", page, perPage)
+ query := fmt.Sprintf("?page=%v&per_page=%v&only_orphaned="+c.boolString(true), page, perPage)
r, err := c.DoApiGet(c.GetBotsRoute()+query, etag)
if err != nil {
return nil, BuildErrorResponse(r, err)
@@ -1659,7 +1800,7 @@ func (c *Client4) GetAllTeams(etag string, page int, perPage int) ([]*Team, *Res
// GetAllTeamsWithTotalCount returns all teams based on permissions.
func (c *Client4) GetAllTeamsWithTotalCount(etag string, page int, perPage int) ([]*Team, int64, *Response) {
- query := fmt.Sprintf("?page=%v&per_page=%v&include_total_count=true", page, perPage)
+ query := fmt.Sprintf("?page=%v&per_page=%v&include_total_count="+c.boolString(true), page, perPage)
r, err := c.DoApiGet(c.GetTeamsRoute()+query, etag)
if err != nil {
return nil, 0, BuildErrorResponse(r, err)
@@ -1811,7 +1952,7 @@ func (c *Client4) SoftDeleteTeam(teamId string) (bool, *Response) {
// PermanentDeleteTeam deletes the team, should only be used when needed for
// compliance and the like.
func (c *Client4) PermanentDeleteTeam(teamId string) (bool, *Response) {
- r, err := c.DoApiDelete(c.GetTeamRoute(teamId) + "?permanent=true")
+ r, err := c.DoApiDelete(c.GetTeamRoute(teamId) + "?permanent=" + c.boolString(true))
if err != nil {
return false, BuildErrorResponse(r, err)
}
@@ -1931,7 +2072,7 @@ func (c *Client4) AddTeamMembersGracefully(teamId string, userIds []string) ([]*
members = append(members, member)
}
- r, err := c.DoApiPost(c.GetTeamMembersRoute(teamId)+"/batch?graceful=true", TeamMembersToJson(members))
+ r, err := c.DoApiPost(c.GetTeamMembersRoute(teamId)+"/batch?graceful="+c.boolString(true), TeamMembersToJson(members))
if err != nil {
return nil, BuildErrorResponse(r, err)
}
@@ -2049,7 +2190,7 @@ func (c *Client4) InviteGuestsToTeam(teamId string, userEmails []string, channel
// InviteUsersToTeam invite users by email to the team.
func (c *Client4) InviteUsersToTeamGracefully(teamId string, userEmails []string) ([]*EmailInviteWithError, *Response) {
- r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite/email?graceful=true", ArrayToJson(userEmails))
+ r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite/email?graceful="+c.boolString(true), ArrayToJson(userEmails))
if err != nil {
return nil, BuildErrorResponse(r, err)
}
@@ -2064,7 +2205,7 @@ func (c *Client4) InviteGuestsToTeamGracefully(teamId string, userEmails []strin
Channels: channels,
Message: message,
}
- r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite-guests/email?graceful=true", guestsInvite.ToJson())
+ r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite-guests/email?graceful="+c.boolString(true), guestsInvite.ToJson())
if err != nil {
return nil, BuildErrorResponse(r, err)
}
@@ -2163,7 +2304,16 @@ func (c *Client4) RemoveTeamIcon(teamId string) (bool, *Response) {
// GetAllChannels get all the channels. Must be a system administrator.
func (c *Client4) GetAllChannels(page int, perPage int, etag string) (*ChannelListWithTeamData, *Response) {
- query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
+ return c.getAllChannels(page, perPage, etag, false)
+}
+
+// GetAllChannelsIncludeDeleted get all the channels. Must be a system administrator.
+func (c *Client4) GetAllChannelsIncludeDeleted(page int, perPage int, etag string) (*ChannelListWithTeamData, *Response) {
+ return c.getAllChannels(page, perPage, etag, true)
+}
+
+func (c *Client4) getAllChannels(page int, perPage int, etag string, includeDeleted bool) (*ChannelListWithTeamData, *Response) {
+ query := fmt.Sprintf("?page=%v&per_page=%v&include_deleted=%v", page, perPage, includeDeleted)
r, err := c.DoApiGet(c.GetChannelsRoute()+query, etag)
if err != nil {
return nil, BuildErrorResponse(r, err)
@@ -2174,7 +2324,7 @@ func (c *Client4) GetAllChannels(page int, perPage int, etag string) (*ChannelLi
// GetAllChannelsWithCount get all the channels including the total count. Must be a system administrator.
func (c *Client4) GetAllChannelsWithCount(page int, perPage int, etag string) (*ChannelListWithTeamData, int64, *Response) {
- query := fmt.Sprintf("?page=%v&per_page=%v&include_total_count=true", page, perPage)
+ query := fmt.Sprintf("?page=%v&per_page=%v&include_total_count="+c.boolString(true), page, perPage)
r, err := c.DoApiGet(c.GetChannelsRoute()+query, etag)
if err != nil {
return nil, 0, BuildErrorResponse(r, err)
@@ -2307,6 +2457,17 @@ func (c *Client4) GetPinnedPosts(channelId string, etag string) (*PostList, *Res
return PostListFromJson(r.Body), BuildResponse(r)
}
+// GetPrivateChannelsForTeam returns a list of private channels based on the provided team id string.
+func (c *Client4) GetPrivateChannelsForTeam(teamId string, page int, perPage int, etag string) ([]*Channel, *Response) {
+ query := fmt.Sprintf("/private?page=%v&per_page=%v", page, perPage)
+ r, err := c.DoApiGet(c.GetChannelsForTeamRoute(teamId)+query, etag)
+ if err != nil {
+ return nil, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return ChannelSliceFromJson(r.Body), BuildResponse(r)
+}
+
// GetPublicChannelsForTeam returns a list of public channels based on the provided team id string.
func (c *Client4) GetPublicChannelsForTeam(teamId string, page int, perPage int, etag string) ([]*Channel, *Response) {
query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
@@ -2349,6 +2510,18 @@ func (c *Client4) GetChannelsForTeamForUser(teamId, userId string, includeDelete
return ChannelSliceFromJson(r.Body), BuildResponse(r)
}
+// GetChannelsForTeamAndUserWithLastDeleteAt returns a list channels of a team for a user, additionally filtered with lastDeleteAt. This does not have any effect if includeDeleted is set to false.
+func (c *Client4) GetChannelsForTeamAndUserWithLastDeleteAt(teamId, userId string, includeDeleted bool, lastDeleteAt int, etag string) ([]*Channel, *Response) {
+ route := fmt.Sprintf(c.GetUserRoute(userId) + c.GetTeamRoute(teamId) + "/channels")
+ route += fmt.Sprintf("?include_deleted=%v&last_delete_at=%d", includeDeleted, lastDeleteAt)
+ r, err := c.DoApiGet(route, etag)
+ if err != nil {
+ return nil, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return ChannelSliceFromJson(r.Body), BuildResponse(r)
+}
+
// SearchChannels returns the channels on a team matching the provided search term.
func (c *Client4) SearchChannels(teamId string, search *ChannelSearch) ([]*Channel, *Response) {
r, err := c.DoApiPost(c.GetChannelsForTeamRoute(teamId)+"/search", search.ToJson())
@@ -2409,6 +2582,30 @@ func (c *Client4) DeleteChannel(channelId string) (bool, *Response) {
return CheckStatusOK(r), BuildResponse(r)
}
+// PermanentDeleteChannel deletes a channel based on the provided channel id string.
+func (c *Client4) PermanentDeleteChannel(channelId string) (bool, *Response) {
+ r, err := c.DoApiDelete(c.GetChannelRoute(channelId) + "?permanent=" + c.boolString(true))
+ if err != nil {
+ return false, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+}
+
+// MoveChannel moves the channel to the destination team.
+func (c *Client4) MoveChannel(channelId, teamId string, force bool) (*Channel, *Response) {
+ requestBody := map[string]interface{}{
+ "team_id": teamId,
+ "force": force,
+ }
+ r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/move", StringInterfaceToJson(requestBody))
+ if err != nil {
+ return nil, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return ChannelFromJson(r.Body), BuildResponse(r)
+}
+
// GetChannelByName returns a channel based on the provided channel name and team id strings.
func (c *Client4) GetChannelByName(channelName, teamId string, etag string) (*Channel, *Response) {
r, err := c.DoApiGet(c.GetChannelByNameRoute(channelName, teamId), etag)
@@ -2421,7 +2618,7 @@ func (c *Client4) GetChannelByName(channelName, teamId string, etag string) (*Ch
// GetChannelByNameIncludeDeleted returns a channel based on the provided channel name and team id strings. Other then GetChannelByName it will also return deleted channels.
func (c *Client4) GetChannelByNameIncludeDeleted(channelName, teamId string, etag string) (*Channel, *Response) {
- r, err := c.DoApiGet(c.GetChannelByNameRoute(channelName, teamId)+"?include_deleted=true", etag)
+ r, err := c.DoApiGet(c.GetChannelByNameRoute(channelName, teamId)+"?include_deleted="+c.boolString(true), etag)
if err != nil {
return nil, BuildErrorResponse(r, err)
}
@@ -2441,7 +2638,7 @@ func (c *Client4) GetChannelByNameForTeamName(channelName, teamName string, etag
// GetChannelByNameForTeamNameIncludeDeleted returns a channel based on the provided channel name and team name strings. Other then GetChannelByNameForTeamName it will also return deleted channels.
func (c *Client4) GetChannelByNameForTeamNameIncludeDeleted(channelName, teamName string, etag string) (*Channel, *Response) {
- r, err := c.DoApiGet(c.GetChannelByNameForTeamNameRoute(channelName, teamName)+"?include_deleted=true", etag)
+ r, err := c.DoApiGet(c.GetChannelByNameForTeamNameRoute(channelName, teamName)+"?include_deleted="+c.boolString(true), etag)
if err != nil {
return nil, BuildErrorResponse(r, err)
}
@@ -3059,7 +3256,7 @@ func (c *Client4) GetPing() (string, *Response) {
// GetPingWithServerStatus will return ok if several basic server health checks
// all pass successfully.
func (c *Client4) GetPingWithServerStatus() (string, *Response) {
- r, err := c.DoApiGet(c.GetSystemRoute()+"/ping?get_server_status=true", "")
+ r, err := c.DoApiGet(c.GetSystemRoute()+"/ping?get_server_status="+c.boolString(true), "")
if r != nil && r.StatusCode == 500 {
defer r.Body.Close()
return STATUS_UNHEALTHY, BuildErrorResponse(r, err)
@@ -3187,6 +3384,19 @@ func (c *Client4) UpdateConfig(config *Config) (*Config, *Response) {
return ConfigFromJson(r.Body), BuildResponse(r)
}
+// MigrateConfig will migrate existing config to the new one.
+func (c *Client4) MigrateConfig(from, to string) (bool, *Response) {
+ m := make(map[string]string, 2)
+ m["from"] = from
+ m["to"] = to
+ r, err := c.DoApiPost(c.GetConfigRoute()+"/migrate", MapToJson(m))
+ if err != nil {
+ return false, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return true, BuildResponse(r)
+}
+
// UploadLicenseFile will add a license file to the system.
func (c *Client4) UploadLicenseFile(data []byte) (bool, *Response) {
body := &bytes.Buffer{}
@@ -3470,7 +3680,7 @@ func (c *Client4) GetSamlMetadata() (string, *Response) {
return buf.String(), BuildResponse(r)
}
-func samlFileToMultipart(data []byte, filename string) ([]byte, *multipart.Writer, error) {
+func fileToMultipart(data []byte, filename string) ([]byte, *multipart.Writer, error) {
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
@@ -3493,7 +3703,7 @@ func samlFileToMultipart(data []byte, filename string) ([]byte, *multipart.Write
// UploadSamlIdpCertificate will upload an IDP certificate for SAML and set the config to use it.
// The filename parameter is deprecated and ignored: the server will pick a hard-coded filename when writing to disk.
func (c *Client4) UploadSamlIdpCertificate(data []byte, filename string) (bool, *Response) {
- body, writer, err := samlFileToMultipart(data, filename)
+ body, writer, err := fileToMultipart(data, filename)
if err != nil {
return false, &Response{Error: NewAppError("UploadSamlIdpCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)}
}
@@ -3505,7 +3715,7 @@ func (c *Client4) UploadSamlIdpCertificate(data []byte, filename string) (bool,
// UploadSamlPublicCertificate will upload a public certificate for SAML and set the config to use it.
// The filename parameter is deprecated and ignored: the server will pick a hard-coded filename when writing to disk.
func (c *Client4) UploadSamlPublicCertificate(data []byte, filename string) (bool, *Response) {
- body, writer, err := samlFileToMultipart(data, filename)
+ body, writer, err := fileToMultipart(data, filename)
if err != nil {
return false, &Response{Error: NewAppError("UploadSamlPublicCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)}
}
@@ -3517,7 +3727,7 @@ func (c *Client4) UploadSamlPublicCertificate(data []byte, filename string) (boo
// UploadSamlPrivateCertificate will upload a private key for SAML and set the config to use it.
// The filename parameter is deprecated and ignored: the server will pick a hard-coded filename when writing to disk.
func (c *Client4) UploadSamlPrivateCertificate(data []byte, filename string) (bool, *Response) {
- body, writer, err := samlFileToMultipart(data, filename)
+ body, writer, err := fileToMultipart(data, filename)
if err != nil {
return false, &Response{Error: NewAppError("UploadSamlPrivateCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)}
}
@@ -3685,7 +3895,19 @@ func (c *Client4) GetLdapGroups() ([]*Group, *Response) {
}
defer closeBody(r)
- return GroupsFromJson(r.Body), BuildResponse(r)
+ responseData := struct {
+ Count int `json:"count"`
+ Groups []*Group `json:"groups"`
+ }{}
+ if err := json.NewDecoder(r.Body).Decode(&responseData); err != nil {
+ appErr := NewAppError("Api4.GetLdapGroups", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError)
+ return nil, BuildErrorResponse(r, appErr)
+ }
+ for i := range responseData.Groups {
+ responseData.Groups[i].DisplayName = *responseData.Groups[i].Name
+ }
+
+ return responseData.Groups, BuildResponse(r)
}
// LinkLdapGroup creates or undeletes a Mattermost group and associates it to the given LDAP group DN.
@@ -3714,6 +3936,18 @@ func (c *Client4) UnlinkLdapGroup(dn string) (*Group, *Response) {
return GroupFromJson(r.Body), BuildResponse(r)
}
+// MigrateIdLdap migrates the LDAP enabled users to given attribute
+func (c *Client4) MigrateIdLdap(toAttribute string) (bool, *Response) {
+ r, err := c.DoApiPost(c.GetLdapRoute()+"/migrateid", MapToJson(map[string]string{
+ "toAttribute": toAttribute,
+ }))
+ if err != nil {
+ return false, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+}
+
// GetGroupsByChannel retrieves the Mattermost Groups associated with a given channel
func (c *Client4) GetGroupsByChannel(channelId string, opts GroupSearchOpts) ([]*GroupWithSchemeAdmin, int, *Response) {
path := fmt.Sprintf("%s/groups?q=%v&include_member_count=%v&filter_allow_reference=%v", c.GetChannelRoute(channelId), opts.Q, opts.IncludeMemberCount, opts.FilterAllowReference)
@@ -3828,6 +4062,74 @@ func (c *Client4) GetGroupsByUserId(userId string) ([]*Group, *Response) {
return GroupsFromJson(r.Body), BuildResponse(r)
}
+func (c *Client4) MigrateAuthToLdap(fromAuthService string, matchField string, force bool) (bool, *Response) {
+ r, err := c.DoApiPost(c.GetUsersRoute()+"/migrate_auth/ldap", StringInterfaceToJson(map[string]interface{}{
+ "from": fromAuthService,
+ "force": force,
+ "match_field": matchField,
+ }))
+ if err != nil {
+ return false, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+}
+
+func (c *Client4) MigrateAuthToSaml(fromAuthService string, usersMap map[string]string, auto bool) (bool, *Response) {
+ r, err := c.DoApiPost(c.GetUsersRoute()+"/migrate_auth/saml", StringInterfaceToJson(map[string]interface{}{
+ "from": fromAuthService,
+ "auto": auto,
+ "matches": usersMap,
+ }))
+ if err != nil {
+ return false, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+}
+
+// UploadLdapPublicCertificate will upload a public certificate for LDAP and set the config to use it.
+func (c *Client4) UploadLdapPublicCertificate(data []byte) (bool, *Response) {
+ body, writer, err := fileToMultipart(data, LDAP_PUBIC_CERTIFICATE_NAME)
+ if err != nil {
+ return false, &Response{Error: NewAppError("UploadLdapPublicCertificate", "model.client.upload_ldap_cert.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ _, resp := c.DoUploadFile(c.GetLdapRoute()+"/certificate/public", body, writer.FormDataContentType())
+ return resp.Error == nil, resp
+}
+
+// UploadLdapPrivateCertificate will upload a private key for LDAP and set the config to use it.
+func (c *Client4) UploadLdapPrivateCertificate(data []byte) (bool, *Response) {
+ body, writer, err := fileToMultipart(data, LDAP_PRIVATE_KEY_NAME)
+ if err != nil {
+ return false, &Response{Error: NewAppError("UploadLdapPrivateCertificate", "model.client.upload_Ldap_cert.app_error", nil, err.Error(), http.StatusBadRequest)}
+ }
+
+ _, resp := c.DoUploadFile(c.GetLdapRoute()+"/certificate/private", body, writer.FormDataContentType())
+ return resp.Error == nil, resp
+}
+
+// DeleteLdapPublicCertificate deletes the LDAP IDP certificate from the server and updates the config to not use it and disable LDAP.
+func (c *Client4) DeleteLdapPublicCertificate() (bool, *Response) {
+ r, err := c.DoApiDelete(c.GetLdapRoute() + "/certificate/public")
+ if err != nil {
+ return false, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+}
+
+// DeleteLDAPPrivateCertificate deletes the LDAP IDP certificate from the server and updates the config to not use it and disable LDAP.
+func (c *Client4) DeleteLdapPrivateCertificate() (bool, *Response) {
+ r, err := c.DoApiDelete(c.GetLdapRoute() + "/certificate/private")
+ if err != nil {
+ return false, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+}
+
// Audits Section
// GetAudits returns a list of audits for the whole system.
@@ -4519,6 +4821,21 @@ func (c *Client4) CancelJob(jobId string) (bool, *Response) {
return CheckStatusOK(r), BuildResponse(r)
}
+// DownloadJob downloads the results of the job
+func (c *Client4) DownloadJob(jobId string) ([]byte, *Response) {
+ r, appErr := c.DoApiGet(c.GetJobsRoute()+fmt.Sprintf("/%v/download", jobId), "")
+ if appErr != nil {
+ return nil, BuildErrorResponse(r, appErr)
+ }
+ defer closeBody(r)
+
+ data, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ return nil, BuildErrorResponse(r, NewAppError("GetFile", "model.client.read_job_result_file.app_error", nil, err.Error(), r.StatusCode))
+ }
+ return data, BuildResponse(r)
+}
+
// Roles Section
// GetRole gets a single role by ID.
@@ -4650,7 +4967,7 @@ func (c *Client4) uploadPlugin(file io.Reader, force bool) (*Manifest, *Response
writer := multipart.NewWriter(body)
if force {
- err := writer.WriteField("force", "true")
+ err := writer.WriteField("force", c.boolString(true))
if err != nil {
return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.writer.app_error", nil, err.Error(), 0)}
}
@@ -4693,10 +5010,7 @@ func (c *Client4) uploadPlugin(file io.Reader, force bool) (*Manifest, *Response
}
func (c *Client4) InstallPluginFromUrl(downloadUrl string, force bool) (*Manifest, *Response) {
- forceStr := "false"
- if force {
- forceStr = "true"
- }
+ forceStr := c.boolString(force)
url := fmt.Sprintf("%s?plugin_download_url=%s&force=%s", c.GetPluginsRoute()+"/install_from_url", url.QueryEscape(downloadUrl), forceStr)
r, err := c.DoApiPost(url, "")
@@ -5074,6 +5388,16 @@ func (c *Client4) GetKnownUsers() ([]string, *Response) {
return userIds, BuildResponse(r)
}
+// PublishUserTyping publishes a user is typing websocket event based on the provided TypingRequest.
+func (c *Client4) PublishUserTyping(userID string, typingRequest TypingRequest) (bool, *Response) {
+ r, err := c.DoApiPost(c.GetPublishUserTypingRoute(userID), typingRequest.ToJson())
+ if err != nil {
+ return false, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return CheckStatusOK(r), BuildResponse(r)
+}
+
func (c *Client4) GetChannelMemberCountsByGroup(channelID string, includeTimezones bool, etag string) ([]*ChannelMemberCountByGroup, *Response) {
r, err := c.DoApiGet(c.GetChannelRoute(channelID)+"/member_counts_by_group?include_timezones="+strconv.FormatBool(includeTimezones), etag)
if err != nil {
@@ -5093,3 +5417,202 @@ func (c *Client4) RequestTrialLicense(users int) (bool, *Response) {
defer closeBody(r)
return CheckStatusOK(r), BuildResponse(r)
}
+
+// GetGroupStats retrieves stats for a Mattermost Group
+func (c *Client4) GetGroupStats(groupID string) (*GroupStats, *Response) {
+ r, appErr := c.DoApiGet(c.GetGroupRoute(groupID)+"/stats", "")
+ if appErr != nil {
+ return nil, BuildErrorResponse(r, appErr)
+ }
+ defer closeBody(r)
+ return GroupStatsFromJson(r.Body), BuildResponse(r)
+}
+
+func (c *Client4) GetSidebarCategoriesForTeamForUser(userID, teamID, etag string) (*OrderedSidebarCategories, *Response) {
+ route := c.GetUserCategoryRoute(userID, teamID)
+ r, appErr := c.DoApiGet(route, etag)
+ if appErr != nil {
+ return nil, BuildErrorResponse(r, appErr)
+ }
+ cat, err := OrderedSidebarCategoriesFromJson(r.Body)
+ if err != nil {
+ return nil, BuildErrorResponse(r, NewAppError("Client4.GetSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode))
+ }
+ return cat, BuildResponse(r)
+}
+
+func (c *Client4) CreateSidebarCategoryForTeamForUser(userID, teamID string, category *SidebarCategoryWithChannels) (*SidebarCategoryWithChannels, *Response) {
+ payload, _ := json.Marshal(category)
+ route := c.GetUserCategoryRoute(userID, teamID)
+ r, appErr := c.doApiPostBytes(route, payload)
+ if appErr != nil {
+ return nil, BuildErrorResponse(r, appErr)
+ }
+ defer closeBody(r)
+ cat, err := SidebarCategoryFromJson(r.Body)
+ if err != nil {
+ return nil, BuildErrorResponse(r, NewAppError("Client4.CreateSidebarCategoryForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode))
+ }
+ return cat, BuildResponse(r)
+}
+
+func (c *Client4) UpdateSidebarCategoriesForTeamForUser(userID, teamID string, categories []*SidebarCategoryWithChannels) ([]*SidebarCategoryWithChannels, *Response) {
+ payload, _ := json.Marshal(categories)
+ route := c.GetUserCategoryRoute(userID, teamID)
+
+ r, appErr := c.doApiPutBytes(route, payload)
+ if appErr != nil {
+ return nil, BuildErrorResponse(r, appErr)
+ }
+ defer closeBody(r)
+
+ categories, err := SidebarCategoriesFromJson(r.Body)
+ if err != nil {
+ return nil, BuildErrorResponse(r, NewAppError("Client4.UpdateSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode))
+ }
+
+ return categories, BuildResponse(r)
+}
+
+func (c *Client4) GetSidebarCategoryOrderForTeamForUser(userID, teamID, etag string) ([]string, *Response) {
+ route := c.GetUserCategoryRoute(userID, teamID) + "/order"
+ r, err := c.DoApiGet(route, etag)
+ if err != nil {
+ return nil, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return ArrayFromJson(r.Body), BuildResponse(r)
+}
+
+func (c *Client4) UpdateSidebarCategoryOrderForTeamForUser(userID, teamID string, order []string) ([]string, *Response) {
+ payload, _ := json.Marshal(order)
+ route := c.GetUserCategoryRoute(userID, teamID) + "/order"
+ r, err := c.doApiPutBytes(route, payload)
+ if err != nil {
+ return nil, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return ArrayFromJson(r.Body), BuildResponse(r)
+}
+
+func (c *Client4) GetSidebarCategoryForTeamForUser(userID, teamID, categoryID, etag string) (*SidebarCategoryWithChannels, *Response) {
+ route := c.GetUserCategoryRoute(userID, teamID) + "/" + categoryID
+ r, appErr := c.DoApiGet(route, etag)
+ if appErr != nil {
+ return nil, BuildErrorResponse(r, appErr)
+ }
+ defer closeBody(r)
+ cat, err := SidebarCategoryFromJson(r.Body)
+ if err != nil {
+ return nil, BuildErrorResponse(r, NewAppError("Client4.UpdateSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode))
+ }
+
+ return cat, BuildResponse(r)
+}
+
+func (c *Client4) UpdateSidebarCategoryForTeamForUser(userID, teamID, categoryID string, category *SidebarCategoryWithChannels) (*SidebarCategoryWithChannels, *Response) {
+ payload, _ := json.Marshal(category)
+ route := c.GetUserCategoryRoute(userID, teamID) + "/" + categoryID
+ r, appErr := c.doApiPutBytes(route, payload)
+ if appErr != nil {
+ return nil, BuildErrorResponse(r, appErr)
+ }
+ defer closeBody(r)
+ cat, err := SidebarCategoryFromJson(r.Body)
+ if err != nil {
+ return nil, BuildErrorResponse(r, NewAppError("Client4.UpdateSidebarCategoriesForTeamForUser", "model.utils.decode_json.app_error", nil, err.Error(), r.StatusCode))
+ }
+
+ return cat, BuildResponse(r)
+}
+
+// CheckIntegrity performs a database integrity check.
+func (c *Client4) CheckIntegrity() ([]IntegrityCheckResult, *Response) {
+ r, err := c.DoApiPost("/integrity", "")
+ if err != nil {
+ return nil, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ var results []IntegrityCheckResult
+ if err := json.NewDecoder(r.Body).Decode(&results); err != nil {
+ appErr := NewAppError("Api4.CheckIntegrity", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError)
+ return nil, BuildErrorResponse(r, appErr)
+ }
+ return results, BuildResponse(r)
+}
+
+func (c *Client4) GetNotices(lastViewed int64, teamId string, client NoticeClientType, clientVersion, locale, etag string) (NoticeMessages, *Response) {
+ url := fmt.Sprintf("/system/notices/%s?lastViewed=%d&client=%s&clientVersion=%s&locale=%s", teamId, lastViewed, client, clientVersion, locale)
+ r, appErr := c.DoApiGet(url, etag)
+ if appErr != nil {
+ return nil, BuildErrorResponse(r, appErr)
+ }
+ defer closeBody(r)
+ notices, err := UnmarshalProductNoticeMessages(r.Body)
+ if err != nil {
+ return nil, &Response{StatusCode: http.StatusBadRequest, Error: NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), http.StatusForbidden)}
+ }
+ return notices, BuildResponse(r)
+}
+
+func (c *Client4) MarkNoticesViewed(ids []string) *Response {
+ r, err := c.DoApiPut("/system/notices/view", ArrayToJson(ids))
+ if err != nil {
+ return BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return BuildResponse(r)
+}
+
+// CreateUpload creates a new upload session.
+func (c *Client4) CreateUpload(us *UploadSession) (*UploadSession, *Response) {
+ r, err := c.DoApiPost(c.GetUploadsRoute(), us.ToJson())
+ if err != nil {
+ return nil, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return UploadSessionFromJson(r.Body), BuildResponse(r)
+}
+
+// GetUpload returns the upload session for the specified uploadId.
+func (c *Client4) GetUpload(uploadId string) (*UploadSession, *Response) {
+ r, err := c.DoApiGet(c.GetUploadRoute(uploadId), "")
+ if err != nil {
+ return nil, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return UploadSessionFromJson(r.Body), BuildResponse(r)
+}
+
+// GetUploadsForUser returns the upload sessions created by the specified
+// userId.
+func (c *Client4) GetUploadsForUser(userId string) ([]*UploadSession, *Response) {
+ r, err := c.DoApiGet(c.GetUserRoute(userId)+"/uploads", "")
+ if err != nil {
+ return nil, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return UploadSessionsFromJson(r.Body), BuildResponse(r)
+}
+
+// UploadData performs an upload. On success it returns
+// a FileInfo object.
+func (c *Client4) UploadData(uploadId string, data io.Reader) (*FileInfo, *Response) {
+ url := c.GetUploadRoute(uploadId)
+ r, err := c.doApiRequestReader("POST", c.ApiUrl+url, data, "")
+ if err != nil {
+ return nil, BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return FileInfoFromJson(r.Body), BuildResponse(r)
+}
+
+func (c *Client4) UpdatePassword(userId, currentPassword, newPassword string) *Response {
+ requestBody := map[string]string{"current_password": currentPassword, "new_password": newPassword}
+ r, err := c.DoApiPut(c.GetUserRoute(userId)+"/password", MapToJson(requestBody))
+ if err != nil {
+ return BuildErrorResponse(r, err)
+ }
+ defer closeBody(r)
+ return BuildResponse(r)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_message.go b/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_message.go
index 86113d78..529f4a93 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_message.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/cluster_message.go
@@ -43,6 +43,16 @@ const (
CLUSTER_EVENT_INVALIDATE_CACHE_FOR_TERMS_OF_SERVICE = "inv_terms_of_service"
CLUSTER_EVENT_BUSY_STATE_CHANGED = "busy_state_change"
+ // Gossip communication
+ CLUSTER_GOSSIP_EVENT_REQUEST_GET_LOGS = "gossip_request_get_logs"
+ CLUSTER_GOSSIP_EVENT_RESPONSE_GET_LOGS = "gossip_response_get_logs"
+ CLUSTER_GOSSIP_EVENT_REQUEST_GET_CLUSTER_STATS = "gossip_request_cluster_stats"
+ CLUSTER_GOSSIP_EVENT_RESPONSE_GET_CLUSTER_STATS = "gossip_response_cluster_stats"
+ CLUSTER_GOSSIP_EVENT_REQUEST_GET_PLUGIN_STATUSES = "gossip_request_plugin_statuses"
+ CLUSTER_GOSSIP_EVENT_RESPONSE_GET_PLUGIN_STATUSES = "gossip_response_plugin_statuses"
+ CLUSTER_GOSSIP_EVENT_REQUEST_SAVE_CONFIG = "gossip_request_save_config"
+ CLUSTER_GOSSIP_EVENT_RESPONSE_SAVE_CONFIG = "gossip_response_save_config"
+
// SendTypes for ClusterMessage.
CLUSTER_SEND_BEST_EFFORT = "best_effort"
CLUSTER_SEND_RELIABLE = "reliable"
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/command.go b/vendor/github.com/mattermost/mattermost-server/v5/model/command.go
index 6dcf52ae..0013046b 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/command.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/command.go
@@ -18,23 +18,26 @@ const (
)
type Command struct {
- Id string `json:"id"`
- Token string `json:"token"`
- CreateAt int64 `json:"create_at"`
- UpdateAt int64 `json:"update_at"`
- DeleteAt int64 `json:"delete_at"`
- CreatorId string `json:"creator_id"`
- TeamId string `json:"team_id"`
- Trigger string `json:"trigger"`
- Method string `json:"method"`
- Username string `json:"username"`
- IconURL string `json:"icon_url"`
- AutoComplete bool `json:"auto_complete"`
- AutoCompleteDesc string `json:"auto_complete_desc"`
- AutoCompleteHint string `json:"auto_complete_hint"`
- DisplayName string `json:"display_name"`
- Description string `json:"description"`
- URL string `json:"url"`
+ Id string `json:"id"`
+ Token string `json:"token"`
+ CreateAt int64 `json:"create_at"`
+ UpdateAt int64 `json:"update_at"`
+ DeleteAt int64 `json:"delete_at"`
+ CreatorId string `json:"creator_id"`
+ TeamId string `json:"team_id"`
+ Trigger string `json:"trigger"`
+ Method string `json:"method"`
+ Username string `json:"username"`
+ IconURL string `json:"icon_url"`
+ AutoComplete bool `json:"auto_complete"`
+ AutoCompleteDesc string `json:"auto_complete_desc"`
+ AutoCompleteHint string `json:"auto_complete_hint"`
+ DisplayName string `json:"display_name"`
+ Description string `json:"description"`
+ URL string `json:"url"`
+ // PluginId records the id of the plugin that created this Command. If it is blank, the Command
+ // was not created by a plugin.
+ PluginId string `json:"plugin_id"`
AutocompleteData *AutocompleteData `db:"-" json:"autocomplete_data,omitempty"`
// AutocompleteIconData is a base64 encoded svg
AutocompleteIconData string `db:"-" json:"autocomplete_icon_data,omitempty"`
@@ -80,10 +83,20 @@ func (o *Command) IsValid() *AppError {
return NewAppError("Command.IsValid", "model.command.is_valid.update_at.app_error", nil, "", http.StatusBadRequest)
}
- if !IsValidId(o.CreatorId) {
+ // If the CreatorId is blank, this should be a command created by a plugin.
+ if o.CreatorId == "" && !IsValidPluginId(o.PluginId) {
+ return NewAppError("Command.IsValid", "model.command.is_valid.plugin_id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ // If the PluginId is blank, this should be a command associated with a userId.
+ if o.PluginId == "" && !IsValidId(o.CreatorId) {
return NewAppError("Command.IsValid", "model.command.is_valid.user_id.app_error", nil, "", http.StatusBadRequest)
}
+ if o.CreatorId != "" && o.PluginId != "" {
+ return NewAppError("Command.IsValid", "model.command.is_valid.plugin_id.app_error", nil, "command cannot have both a CreatorId and a PluginId", http.StatusBadRequest)
+ }
+
if !IsValidId(o.TeamId) {
return NewAppError("Command.IsValid", "model.command.is_valid.team_id.app_error", nil, "", http.StatusBadRequest)
}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/command_args.go b/vendor/github.com/mattermost/mattermost-server/v5/model/command_args.go
index a3bbb4c9..15a6372a 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/command_args.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/command_args.go
@@ -20,9 +20,11 @@ type CommandArgs struct {
Command string `json:"command"`
SiteURL string `json:"-"`
T goi18n.TranslateFunc `json:"-"`
- Session Session `json:"-"`
UserMentions UserMentionMap `json:"-"`
ChannelMentions ChannelMentionMap `json:"-"`
+
+ // DO NOT USE Session field is deprecated. MM-26398
+ Session Session `json:"-"`
}
func (o *CommandArgs) ToJson() string {
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/config.go b/vendor/github.com/mattermost/mattermost-server/v5/model/config.go
index 4ca62e79..f50bbf29 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/config.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/config.go
@@ -47,6 +47,7 @@ const (
GENERIC_NO_CHANNEL_NOTIFICATION = "generic_no_channel"
GENERIC_NOTIFICATION = "generic"
GENERIC_NOTIFICATION_SERVER = "https://push-test.mattermost.com"
+ MM_SUPPORT_ADDRESS = "support@mattermost.com"
FULL_NOTIFICATION = "full"
ID_LOADED_NOTIFICATION = "id_loaded"
@@ -160,8 +161,10 @@ const (
ANALYTICS_SETTINGS_DEFAULT_MAX_USERS_FOR_STATISTICS = 2500
- ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_COLOR = "#f2a93b"
- ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_TEXT_COLOR = "#333333"
+ ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_COLOR = "#f2a93b"
+ ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_TEXT_COLOR = "#333333"
+ ANNOUNCEMENT_SETTINGS_DEFAULT_NOTICES_JSON_URL = "https://notices.mattermost.com/"
+ ANNOUNCEMENT_SETTINGS_DEFAULT_NOTICES_FETCH_FREQUENCY_SECONDS = 3600
TEAM_SETTINGS_DEFAULT_TEAM_TEXT = "default"
@@ -246,94 +249,97 @@ var ServerTLSSupportedCiphers = map[string]uint16{
}
type ServiceSettings struct {
- SiteURL *string `restricted:"true"`
- WebsocketURL *string `restricted:"true"`
- LicenseFileLocation *string `restricted:"true"`
- ListenAddress *string `restricted:"true"`
- ConnectionSecurity *string `restricted:"true"`
- TLSCertFile *string `restricted:"true"`
- TLSKeyFile *string `restricted:"true"`
- TLSMinVer *string `restricted:"true"`
- TLSStrictTransport *bool `restricted:"true"`
- TLSStrictTransportMaxAge *int64 `restricted:"true"`
- TLSOverwriteCiphers []string `restricted:"true"`
- UseLetsEncrypt *bool `restricted:"true"`
- LetsEncryptCertificateCacheFile *string `restricted:"true"`
- Forward80To443 *bool `restricted:"true"`
- TrustedProxyIPHeader []string `restricted:"true"`
- ReadTimeout *int `restricted:"true"`
- WriteTimeout *int `restricted:"true"`
- IdleTimeout *int `restricted:"true"`
- MaximumLoginAttempts *int `restricted:"true"`
- GoroutineHealthThreshold *int `restricted:"true"`
- GoogleDeveloperKey *string `restricted:"true"`
- EnableOAuthServiceProvider *bool
- EnableIncomingWebhooks *bool
- EnableOutgoingWebhooks *bool
- EnableCommands *bool
- DEPRECATED_DO_NOT_USE_EnableOnlyAdminIntegrations *bool `json:"EnableOnlyAdminIntegrations" mapstructure:"EnableOnlyAdminIntegrations"` // This field is deprecated and must not be used.
- EnablePostUsernameOverride *bool
- EnablePostIconOverride *bool
- EnableLinkPreviews *bool
- EnableTesting *bool `restricted:"true"`
- EnableDeveloper *bool `restricted:"true"`
- EnableOpenTracing *bool `restricted:"true"`
- EnableSecurityFixAlert *bool `restricted:"true"`
- EnableInsecureOutgoingConnections *bool `restricted:"true"`
- AllowedUntrustedInternalConnections *string `restricted:"true"`
- EnableMultifactorAuthentication *bool
- EnforceMultifactorAuthentication *bool
- EnableUserAccessTokens *bool
- AllowCorsFrom *string `restricted:"true"`
- CorsExposedHeaders *string `restricted:"true"`
- CorsAllowCredentials *bool `restricted:"true"`
- CorsDebug *bool `restricted:"true"`
- AllowCookiesForSubdomains *bool `restricted:"true"`
- ExtendSessionLengthWithActivity *bool `restricted:"true"`
- SessionLengthWebInDays *int `restricted:"true"`
- SessionLengthMobileInDays *int `restricted:"true"`
- SessionLengthSSOInDays *int `restricted:"true"`
- SessionCacheInMinutes *int `restricted:"true"`
- SessionIdleTimeoutInMinutes *int `restricted:"true"`
- WebsocketSecurePort *int `restricted:"true"`
- WebsocketPort *int `restricted:"true"`
- WebserverMode *string `restricted:"true"`
- EnableCustomEmoji *bool
- EnableEmojiPicker *bool
- EnableGifPicker *bool
- GfycatApiKey *string
- GfycatApiSecret *string
- DEPRECATED_DO_NOT_USE_RestrictCustomEmojiCreation *string `json:"RestrictCustomEmojiCreation" mapstructure:"RestrictCustomEmojiCreation"` // This field is deprecated and must not be used.
- DEPRECATED_DO_NOT_USE_RestrictPostDelete *string `json:"RestrictPostDelete" mapstructure:"RestrictPostDelete"` // This field is deprecated and must not be used.
- DEPRECATED_DO_NOT_USE_AllowEditPost *string `json:"AllowEditPost" mapstructure:"AllowEditPost"` // This field is deprecated and must not be used.
- PostEditTimeLimit *int
- TimeBetweenUserTypingUpdatesMilliseconds *int64 `restricted:"true"`
- EnablePostSearch *bool `restricted:"true"`
- MinimumHashtagLength *int `restricted:"true"`
- EnableUserTypingMessages *bool `restricted:"true"`
- EnableChannelViewedMessages *bool `restricted:"true"`
- EnableUserStatuses *bool `restricted:"true"`
- ExperimentalEnableAuthenticationTransfer *bool `restricted:"true"`
- ClusterLogTimeoutMilliseconds *int `restricted:"true"`
- CloseUnusedDirectMessages *bool
- EnablePreviewFeatures *bool
- EnableTutorial *bool
- ExperimentalEnableDefaultChannelLeaveJoinMessages *bool
- ExperimentalGroupUnreadChannels *string
- ExperimentalChannelOrganization *bool
- ExperimentalChannelSidebarOrganization *string
- DEPRECATED_DO_NOT_USE_ImageProxyType *string `json:"ImageProxyType" mapstructure:"ImageProxyType"` // This field is deprecated and must not be used.
- DEPRECATED_DO_NOT_USE_ImageProxyURL *string `json:"ImageProxyURL" mapstructure:"ImageProxyURL"` // This field is deprecated and must not be used.
- DEPRECATED_DO_NOT_USE_ImageProxyOptions *string `json:"ImageProxyOptions" mapstructure:"ImageProxyOptions"` // This field is deprecated and must not be used.
+ SiteURL *string `access:"environment,authentication,write_restrictable"`
+ WebsocketURL *string `access:"write_restrictable"`
+ LicenseFileLocation *string `access:"write_restrictable"`
+ ListenAddress *string `access:"environment,write_restrictable"`
+ ConnectionSecurity *string `access:"environment,write_restrictable"`
+ TLSCertFile *string `access:"environment,write_restrictable"`
+ TLSKeyFile *string `access:"environment,write_restrictable"`
+ TLSMinVer *string `access:"write_restrictable"`
+ TLSStrictTransport *bool `access:"write_restrictable"`
+ TLSStrictTransportMaxAge *int64 `access:"write_restrictable"`
+ TLSOverwriteCiphers []string `access:"write_restrictable"`
+ UseLetsEncrypt *bool `access:"environment,write_restrictable"`
+ LetsEncryptCertificateCacheFile *string `access:"environment,write_restrictable"`
+ Forward80To443 *bool `access:"environment,write_restrictable"`
+ TrustedProxyIPHeader []string `access:"write_restrictable"`
+ ReadTimeout *int `access:"environment,write_restrictable"`
+ WriteTimeout *int `access:"environment,write_restrictable"`
+ IdleTimeout *int `access:"write_restrictable"`
+ MaximumLoginAttempts *int `access:"authentication,write_restrictable"`
+ GoroutineHealthThreshold *int `access:"write_restrictable"`
+ GoogleDeveloperKey *string `access:"site,write_restrictable"`
+ EnableOAuthServiceProvider *bool `access:"integrations"`
+ EnableIncomingWebhooks *bool `access:"integrations"`
+ EnableOutgoingWebhooks *bool `access:"integrations"`
+ EnableCommands *bool `access:"integrations"`
+ DEPRECATED_DO_NOT_USE_EnableOnlyAdminIntegrations *bool `json:"EnableOnlyAdminIntegrations" mapstructure:"EnableOnlyAdminIntegrations"` // This field is deprecated and must not be used.
+ EnablePostUsernameOverride *bool `access:"integrations"`
+ EnablePostIconOverride *bool `access:"integrations"`
+ EnableLinkPreviews *bool `access:"site"`
+ EnableTesting *bool `access:"environment,write_restrictable"`
+ EnableDeveloper *bool `access:"environment,write_restrictable"`
+ EnableOpenTracing *bool `access:"write_restrictable"`
+ EnableSecurityFixAlert *bool `access:"environment,write_restrictable"`
+ EnableInsecureOutgoingConnections *bool `access:"environment,write_restrictable"`
+ AllowedUntrustedInternalConnections *string `access:"environment,write_restrictable"`
+ EnableMultifactorAuthentication *bool `access:"authentication"`
+ EnforceMultifactorAuthentication *bool `access:"authentication"`
+ EnableUserAccessTokens *bool `access:"integrations"`
+ AllowCorsFrom *string `access:"integrations,write_restrictable"`
+ CorsExposedHeaders *string `access:"integrations,write_restrictable"`
+ CorsAllowCredentials *bool `access:"integrations,write_restrictable"`
+ CorsDebug *bool `access:"integrations,write_restrictable"`
+ AllowCookiesForSubdomains *bool `access:"write_restrictable"`
+ ExtendSessionLengthWithActivity *bool `access:"environment,write_restrictable"`
+ SessionLengthWebInDays *int `access:"environment,write_restrictable"`
+ SessionLengthMobileInDays *int `access:"environment,write_restrictable"`
+ SessionLengthSSOInDays *int `access:"environment,write_restrictable"`
+ SessionCacheInMinutes *int `access:"environment,write_restrictable"`
+ SessionIdleTimeoutInMinutes *int `access:"environment,write_restrictable"`
+ WebsocketSecurePort *int `access:"write_restrictable"`
+ WebsocketPort *int `access:"write_restrictable"`
+ WebserverMode *string `access:"environment,write_restrictable"`
+ EnableCustomEmoji *bool `access:"site"`
+ EnableEmojiPicker *bool `access:"site"`
+ EnableGifPicker *bool `access:"integrations"`
+ GfycatApiKey *string `access:"integrations"`
+ GfycatApiSecret *string `access:"integrations"`
+ DEPRECATED_DO_NOT_USE_RestrictCustomEmojiCreation *string `json:"RestrictCustomEmojiCreation" mapstructure:"RestrictCustomEmojiCreation"` // This field is deprecated and must not be used.
+ DEPRECATED_DO_NOT_USE_RestrictPostDelete *string `json:"RestrictPostDelete" mapstructure:"RestrictPostDelete"` // This field is deprecated and must not be used.
+ DEPRECATED_DO_NOT_USE_AllowEditPost *string `json:"AllowEditPost" mapstructure:"AllowEditPost"` // This field is deprecated and must not be used.
+ PostEditTimeLimit *int `access:"user_management_permissions"`
+ TimeBetweenUserTypingUpdatesMilliseconds *int64 `access:"experimental,write_restrictable"`
+ EnablePostSearch *bool `access:"write_restrictable"`
+ MinimumHashtagLength *int `access:"environment,write_restrictable"`
+ EnableUserTypingMessages *bool `access:"experimental,write_restrictable"`
+ EnableChannelViewedMessages *bool `access:"experimental,write_restrictable"`
+ EnableUserStatuses *bool `access:"write_restrictable"`
+ ExperimentalEnableAuthenticationTransfer *bool `access:"experimental,write_restrictable"`
+ ClusterLogTimeoutMilliseconds *int `access:"write_restrictable"`
+ CloseUnusedDirectMessages *bool `access:"experimental"`
+ EnablePreviewFeatures *bool `access:"experimental"`
+ EnableTutorial *bool `access:"experimental"`
+ ExperimentalEnableDefaultChannelLeaveJoinMessages *bool `access:"experimental"`
+ ExperimentalGroupUnreadChannels *string `access:"experimental"`
+ ExperimentalChannelOrganization *bool `access:"experimental"`
+ ExperimentalChannelSidebarOrganization *string `access:"experimental"`
+ ExperimentalDataPrefetch *bool `access:"experimental"`
+ DEPRECATED_DO_NOT_USE_ImageProxyType *string `json:"ImageProxyType" mapstructure:"ImageProxyType"` // This field is deprecated and must not be used.
+ DEPRECATED_DO_NOT_USE_ImageProxyURL *string `json:"ImageProxyURL" mapstructure:"ImageProxyURL"` // This field is deprecated and must not be used.
+ DEPRECATED_DO_NOT_USE_ImageProxyOptions *string `json:"ImageProxyOptions" mapstructure:"ImageProxyOptions"` // This field is deprecated and must not be used.
EnableAPITeamDeletion *bool
- ExperimentalEnableHardenedMode *bool
- DisableLegacyMFA *bool `restricted:"true"`
- ExperimentalStrictCSRFEnforcement *bool `restricted:"true"`
- EnableEmailInvitations *bool
- DisableBotsWhenOwnerIsDeactivated *bool `restricted:"true"`
- EnableBotAccountCreation *bool
- EnableSVGs *bool
- EnableLatex *bool
+ EnableAPIUserDeletion *bool
+ ExperimentalEnableHardenedMode *bool `access:"experimental"`
+ DisableLegacyMFA *bool `access:"write_restrictable"`
+ ExperimentalStrictCSRFEnforcement *bool `access:"experimental,write_restrictable"`
+ EnableEmailInvitations *bool `access:"authentication"`
+ DisableBotsWhenOwnerIsDeactivated *bool `access:"integrations,write_restrictable"`
+ EnableBotAccountCreation *bool `access:"integrations"`
+ EnableSVGs *bool `access:"site"`
+ EnableLatex *bool `access:"site"`
+ EnableAPIChannelDeletion *bool
EnableLocalMode *bool
LocalModeSocketLocation *string
}
@@ -614,7 +620,7 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) {
}
if s.EnableCustomEmoji == nil {
- s.EnableCustomEmoji = NewBool(false)
+ s.EnableCustomEmoji = NewBool(true)
}
if s.EnableEmojiPicker == nil {
@@ -622,7 +628,7 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) {
}
if s.EnableGifPicker == nil {
- s.EnableGifPicker = NewBool(false)
+ s.EnableGifPicker = NewBool(true)
}
if s.GfycatApiKey == nil || *s.GfycatApiKey == "" {
@@ -678,6 +684,10 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) {
s.ExperimentalChannelSidebarOrganization = NewString("disabled")
}
+ if s.ExperimentalDataPrefetch == nil {
+ s.ExperimentalDataPrefetch = NewBool(true)
+ }
+
if s.DEPRECATED_DO_NOT_USE_ImageProxyType == nil {
s.DEPRECATED_DO_NOT_USE_ImageProxyType = NewString("")
}
@@ -694,6 +704,14 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) {
s.EnableAPITeamDeletion = NewBool(false)
}
+ if s.EnableAPIUserDeletion == nil {
+ s.EnableAPIUserDeletion = NewBool(false)
+ }
+
+ if s.EnableAPIChannelDeletion == nil {
+ s.EnableAPIChannelDeletion = NewBool(false)
+ }
+
if s.ExperimentalEnableHardenedMode == nil {
s.ExperimentalEnableHardenedMode = NewBool(false)
}
@@ -740,20 +758,21 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) {
}
type ClusterSettings struct {
- Enable *bool `restricted:"true"`
- ClusterName *string `restricted:"true"`
- OverrideHostname *string `restricted:"true"`
- NetworkInterface *string `restricted:"true"`
- BindAddress *string `restricted:"true"`
- AdvertiseAddress *string `restricted:"true"`
- UseIpAddress *bool `restricted:"true"`
- UseExperimentalGossip *bool `restricted:"true"`
- ReadOnlyConfig *bool `restricted:"true"`
- GossipPort *int `restricted:"true"`
- StreamingPort *int `restricted:"true"`
- MaxIdleConns *int `restricted:"true"`
- MaxIdleConnsPerHost *int `restricted:"true"`
- IdleConnTimeoutMilliseconds *int `restricted:"true"`
+ Enable *bool `access:"environment,write_restrictable"`
+ ClusterName *string `access:"environment,write_restrictable"`
+ OverrideHostname *string `access:"environment,write_restrictable"`
+ NetworkInterface *string `access:"environment,write_restrictable"`
+ BindAddress *string `access:"environment,write_restrictable"`
+ AdvertiseAddress *string `access:"environment,write_restrictable"`
+ UseIpAddress *bool `access:"environment,write_restrictable"`
+ UseExperimentalGossip *bool `access:"environment,write_restrictable"`
+ EnableExperimentalGossipEncryption *bool `access:"environment,write_restrictable"`
+ ReadOnlyConfig *bool `access:"environment,write_restrictable"`
+ GossipPort *int `access:"environment,write_restrictable"`
+ StreamingPort *int `access:"environment,write_restrictable"`
+ MaxIdleConns *int `access:"environment,write_restrictable"`
+ MaxIdleConnsPerHost *int `access:"environment,write_restrictable"`
+ IdleConnTimeoutMilliseconds *int `access:"environment,write_restrictable"`
}
func (s *ClusterSettings) SetDefaults() {
@@ -789,6 +808,10 @@ func (s *ClusterSettings) SetDefaults() {
s.UseExperimentalGossip = NewBool(false)
}
+ if s.EnableExperimentalGossipEncryption == nil {
+ s.EnableExperimentalGossipEncryption = NewBool(false)
+ }
+
if s.ReadOnlyConfig == nil {
s.ReadOnlyConfig = NewBool(true)
}
@@ -815,9 +838,9 @@ func (s *ClusterSettings) SetDefaults() {
}
type MetricsSettings struct {
- Enable *bool `restricted:"true"`
- BlockProfileRate *int `restricted:"true"`
- ListenAddress *string `restricted:"true"`
+ Enable *bool `access:"environment,write_restrictable"`
+ BlockProfileRate *int `access:"environment,write_restrictable"`
+ ListenAddress *string `access:"environment,write_restrictable"`
}
func (s *MetricsSettings) SetDefaults() {
@@ -835,12 +858,14 @@ func (s *MetricsSettings) SetDefaults() {
}
type ExperimentalSettings struct {
- ClientSideCertEnable *bool
- ClientSideCertCheck *string
- EnableClickToReply *bool `restricted:"true"`
- LinkMetadataTimeoutMilliseconds *int64 `restricted:"true"`
- RestrictSystemAdmin *bool `restricted:"true"`
- UseNewSAMLLibrary *bool
+ ClientSideCertEnable *bool `access:"experimental"`
+ ClientSideCertCheck *string `access:"experimental"`
+ EnableClickToReply *bool `access:"experimental,write_restrictable"`
+ LinkMetadataTimeoutMilliseconds *int64 `access:"experimental,write_restrictable"`
+ RestrictSystemAdmin *bool `access:"experimental,write_restrictable"`
+ UseNewSAMLLibrary *bool `access:"experimental"`
+ CloudUserLimit *int64 `access:"experimental,write_restrictable"`
+ CloudBilling *bool `access:"experimental,write_restrictable"`
}
func (s *ExperimentalSettings) SetDefaults() {
@@ -863,13 +888,23 @@ func (s *ExperimentalSettings) SetDefaults() {
if s.RestrictSystemAdmin == nil {
s.RestrictSystemAdmin = NewBool(false)
}
+
+ if s.CloudUserLimit == nil {
+ // User limit 0 is treated as no limit
+ s.CloudUserLimit = NewInt64(0)
+ }
+
+ if s.CloudBilling == nil {
+ s.CloudBilling = NewBool(false)
+ }
+
if s.UseNewSAMLLibrary == nil {
s.UseNewSAMLLibrary = NewBool(false)
}
}
type AnalyticsSettings struct {
- MaxUsersForStatistics *int `restricted:"true"`
+ MaxUsersForStatistics *int `access:"write_restrictable"`
}
func (s *AnalyticsSettings) SetDefaults() {
@@ -879,13 +914,13 @@ func (s *AnalyticsSettings) SetDefaults() {
}
type SSOSettings struct {
- Enable *bool
- Secret *string
- Id *string
- Scope *string
- AuthEndpoint *string
- TokenEndpoint *string
- UserApiEndpoint *string
+ Enable *bool `access:"authentication"`
+ Secret *string `access:"authentication"`
+ Id *string `access:"authentication"`
+ Scope *string `access:"authentication"`
+ AuthEndpoint *string `access:"authentication"`
+ TokenEndpoint *string `access:"authentication"`
+ UserApiEndpoint *string `access:"authentication"`
}
func (s *SSOSettings) setDefaults(scope, authEndpoint, tokenEndpoint, userApiEndpoint string) {
@@ -919,14 +954,14 @@ func (s *SSOSettings) setDefaults(scope, authEndpoint, tokenEndpoint, userApiEnd
}
type Office365Settings struct {
- Enable *bool
- Secret *string
- Id *string
- Scope *string
- AuthEndpoint *string
- TokenEndpoint *string
- UserApiEndpoint *string
- DirectoryId *string
+ Enable *bool `access:"authentication"`
+ Secret *string `access:"authentication"`
+ Id *string `access:"authentication"`
+ Scope *string `access:"authentication"`
+ AuthEndpoint *string `access:"authentication"`
+ TokenEndpoint *string `access:"authentication"`
+ UserApiEndpoint *string `access:"authentication"`
+ DirectoryId *string `access:"authentication"`
}
func (s *Office365Settings) setDefaults() {
@@ -976,17 +1011,17 @@ func (s *Office365Settings) SSOSettings() *SSOSettings {
}
type SqlSettings struct {
- DriverName *string `restricted:"true"`
- DataSource *string `restricted:"true"`
- DataSourceReplicas []string `restricted:"true"`
- DataSourceSearchReplicas []string `restricted:"true"`
- MaxIdleConns *int `restricted:"true"`
- ConnMaxLifetimeMilliseconds *int `restricted:"true"`
- MaxOpenConns *int `restricted:"true"`
- Trace *bool `restricted:"true"`
- AtRestEncryptKey *string `restricted:"true"`
- QueryTimeout *int `restricted:"true"`
- DisableDatabaseSearch *bool `restricted:"true"`
+ DriverName *string `access:"environment,write_restrictable"`
+ DataSource *string `access:"environment,write_restrictable"`
+ DataSourceReplicas []string `access:"environment,write_restrictable"`
+ DataSourceSearchReplicas []string `access:"environment,write_restrictable"`
+ MaxIdleConns *int `access:"environment,write_restrictable"`
+ ConnMaxLifetimeMilliseconds *int `access:"environment,write_restrictable"`
+ MaxOpenConns *int `access:"environment,write_restrictable"`
+ Trace *bool `access:"environment,write_restrictable"`
+ AtRestEncryptKey *string `access:"environment,write_restrictable"`
+ QueryTimeout *int `access:"environment,write_restrictable"`
+ DisableDatabaseSearch *bool `access:"environment,write_restrictable"`
}
func (s *SqlSettings) SetDefaults(isUpdate bool) {
@@ -1042,15 +1077,17 @@ func (s *SqlSettings) SetDefaults(isUpdate bool) {
}
type LogSettings struct {
- EnableConsole *bool `restricted:"true"`
- ConsoleLevel *string `restricted:"true"`
- ConsoleJson *bool `restricted:"true"`
- EnableFile *bool `restricted:"true"`
- FileLevel *string `restricted:"true"`
- FileJson *bool `restricted:"true"`
- FileLocation *string `restricted:"true"`
- EnableWebhookDebugging *bool `restricted:"true"`
- EnableDiagnostics *bool `restricted:"true"`
+ EnableConsole *bool `access:"environment,write_restrictable"`
+ ConsoleLevel *string `access:"environment,write_restrictable"`
+ ConsoleJson *bool `access:"environment,write_restrictable"`
+ EnableFile *bool `access:"environment,write_restrictable"`
+ FileLevel *string `access:"environment,write_restrictable"`
+ FileJson *bool `access:"environment,write_restrictable"`
+ FileLocation *string `access:"environment,write_restrictable"`
+ EnableWebhookDebugging *bool `access:"environment,write_restrictable"`
+ EnableDiagnostics *bool `access:"environment,write_restrictable"`
+ EnableSentry *bool `access:"environment,write_restrictable"`
+ AdvancedLoggingConfig *string `access:"environment,write_restrictable"`
}
func (s *LogSettings) SetDefaults() {
@@ -1082,6 +1119,10 @@ func (s *LogSettings) SetDefaults() {
s.EnableDiagnostics = NewBool(true)
}
+ if s.EnableSentry == nil {
+ s.EnableSentry = NewBool(*s.EnableDiagnostics)
+ }
+
if s.ConsoleJson == nil {
s.ConsoleJson = NewBool(true)
}
@@ -1089,55 +1130,24 @@ func (s *LogSettings) SetDefaults() {
if s.FileJson == nil {
s.FileJson = NewBool(true)
}
+
+ if s.AdvancedLoggingConfig == nil {
+ s.AdvancedLoggingConfig = NewString("")
+ }
}
type ExperimentalAuditSettings struct {
- SysLogEnabled *bool `restricted:"true"`
- SysLogIP *string `restricted:"true"`
- SysLogPort *int `restricted:"true"`
- SysLogTag *string `restricted:"true"`
- SysLogCert *string `restricted:"true"`
- SysLogInsecure *bool `restricted:"true"`
- SysLogMaxQueueSize *int `restricted:"true"`
-
- FileEnabled *bool `restricted:"true"`
- FileName *string `restricted:"true"`
- FileMaxSizeMB *int `restricted:"true"`
- FileMaxAgeDays *int `restricted:"true"`
- FileMaxBackups *int `restricted:"true"`
- FileCompress *bool `restricted:"true"`
- FileMaxQueueSize *int `restricted:"true"`
+ FileEnabled *bool `access:"experimental,write_restrictable"`
+ FileName *string `access:"experimental,write_restrictable"`
+ FileMaxSizeMB *int `access:"experimental,write_restrictable"`
+ FileMaxAgeDays *int `access:"experimental,write_restrictable"`
+ FileMaxBackups *int `access:"experimental,write_restrictable"`
+ FileCompress *bool `access:"experimental,write_restrictable"`
+ FileMaxQueueSize *int `access:"experimental,write_restrictable"`
+ AdvancedLoggingConfig *string `access:"experimental,write_restrictable"`
}
func (s *ExperimentalAuditSettings) SetDefaults() {
- if s.SysLogEnabled == nil {
- s.SysLogEnabled = NewBool(false)
- }
-
- if s.SysLogIP == nil {
- s.SysLogIP = NewString("localhost")
- }
-
- if s.SysLogPort == nil {
- s.SysLogPort = NewInt(6514)
- }
-
- if s.SysLogTag == nil {
- s.SysLogTag = NewString("")
- }
-
- if s.SysLogCert == nil {
- s.SysLogCert = NewString("")
- }
-
- if s.SysLogInsecure == nil {
- s.SysLogInsecure = NewBool(false)
- }
-
- if s.SysLogMaxQueueSize == nil {
- s.SysLogMaxQueueSize = NewInt(1000)
- }
-
if s.FileEnabled == nil {
s.FileEnabled = NewBool(false)
}
@@ -1165,16 +1175,21 @@ func (s *ExperimentalAuditSettings) SetDefaults() {
if s.FileMaxQueueSize == nil {
s.FileMaxQueueSize = NewInt(1000)
}
+
+ if s.AdvancedLoggingConfig == nil {
+ s.AdvancedLoggingConfig = NewString("")
+ }
}
type NotificationLogSettings struct {
- EnableConsole *bool `restricted:"true"`
- ConsoleLevel *string `restricted:"true"`
- ConsoleJson *bool `restricted:"true"`
- EnableFile *bool `restricted:"true"`
- FileLevel *string `restricted:"true"`
- FileJson *bool `restricted:"true"`
- FileLocation *string `restricted:"true"`
+ EnableConsole *bool `access:"write_restrictable"`
+ ConsoleLevel *string `access:"write_restrictable"`
+ ConsoleJson *bool `access:"write_restrictable"`
+ EnableFile *bool `access:"write_restrictable"`
+ FileLevel *string `access:"write_restrictable"`
+ FileJson *bool `access:"write_restrictable"`
+ FileLocation *string `access:"write_restrictable"`
+ AdvancedLoggingConfig *string `access:"write_restrictable"`
}
func (s *NotificationLogSettings) SetDefaults() {
@@ -1205,14 +1220,18 @@ func (s *NotificationLogSettings) SetDefaults() {
if s.FileJson == nil {
s.FileJson = NewBool(true)
}
+
+ if s.AdvancedLoggingConfig == nil {
+ s.AdvancedLoggingConfig = NewString("")
+ }
}
type PasswordSettings struct {
- MinimumLength *int
- Lowercase *bool
- Number *bool
- Uppercase *bool
- Symbol *bool
+ MinimumLength *int `access:"authentication"`
+ Lowercase *bool `access:"authentication"`
+ Number *bool `access:"authentication"`
+ Uppercase *bool `access:"authentication"`
+ Symbol *bool `access:"authentication"`
}
func (s *PasswordSettings) SetDefaults() {
@@ -1238,24 +1257,25 @@ func (s *PasswordSettings) SetDefaults() {
}
type FileSettings struct {
- EnableFileAttachments *bool
- EnableMobileUpload *bool
- EnableMobileDownload *bool
- MaxFileSize *int64
- DriverName *string `restricted:"true"`
- Directory *string `restricted:"true"`
- EnablePublicLink *bool
- PublicLinkSalt *string
- InitialFont *string
- AmazonS3AccessKeyId *string `restricted:"true"`
- AmazonS3SecretAccessKey *string `restricted:"true"`
- AmazonS3Bucket *string `restricted:"true"`
- AmazonS3Region *string `restricted:"true"`
- AmazonS3Endpoint *string `restricted:"true"`
- AmazonS3SSL *bool `restricted:"true"`
- AmazonS3SignV2 *bool `restricted:"true"`
- AmazonS3SSE *bool `restricted:"true"`
- AmazonS3Trace *bool `restricted:"true"`
+ EnableFileAttachments *bool `access:"site"`
+ EnableMobileUpload *bool `access:"site"`
+ EnableMobileDownload *bool `access:"site"`
+ MaxFileSize *int64 `access:"environment"`
+ DriverName *string `access:"environment,write_restrictable"`
+ Directory *string `access:"environment,write_restrictable"`
+ EnablePublicLink *bool `access:"site"`
+ PublicLinkSalt *string `access:"site"`
+ InitialFont *string `access:"environment"`
+ AmazonS3AccessKeyId *string `access:"environment,write_restrictable"`
+ AmazonS3SecretAccessKey *string `access:"environment,write_restrictable"`
+ AmazonS3Bucket *string `access:"environment,write_restrictable"`
+ AmazonS3PathPrefix *string `access:"environment,write_restrictable"`
+ AmazonS3Region *string `access:"environment,write_restrictable"`
+ AmazonS3Endpoint *string `access:"environment,write_restrictable"`
+ AmazonS3SSL *bool `access:"environment,write_restrictable"`
+ AmazonS3SignV2 *bool `access:"environment,write_restrictable"`
+ AmazonS3SSE *bool `access:"environment,write_restrictable"`
+ AmazonS3Trace *bool `access:"environment,write_restrictable"`
}
func (s *FileSettings) SetDefaults(isUpdate bool) {
@@ -1279,7 +1299,7 @@ func (s *FileSettings) SetDefaults(isUpdate bool) {
s.DriverName = NewString(IMAGE_DRIVER_LOCAL)
}
- if s.Directory == nil {
+ if s.Directory == nil || *s.Directory == "" {
s.Directory = NewString(FILE_SETTINGS_DEFAULT_DIRECTORY)
}
@@ -1314,6 +1334,10 @@ func (s *FileSettings) SetDefaults(isUpdate bool) {
s.AmazonS3Bucket = NewString("")
}
+ if s.AmazonS3PathPrefix == nil {
+ s.AmazonS3PathPrefix = NewString("")
+ }
+
if s.AmazonS3Region == nil {
s.AmazonS3Region = NewString("")
}
@@ -1342,35 +1366,36 @@ func (s *FileSettings) SetDefaults(isUpdate bool) {
}
type EmailSettings struct {
- EnableSignUpWithEmail *bool
- EnableSignInWithEmail *bool
- EnableSignInWithUsername *bool
- SendEmailNotifications *bool
- UseChannelInEmailNotifications *bool
- RequireEmailVerification *bool
- FeedbackName *string
- FeedbackEmail *string
- ReplyToAddress *string
- FeedbackOrganization *string
- EnableSMTPAuth *bool `restricted:"true"`
- SMTPUsername *string `restricted:"true"`
- SMTPPassword *string `restricted:"true"`
- SMTPServer *string `restricted:"true"`
- SMTPPort *string `restricted:"true"`
+ EnableSignUpWithEmail *bool `access:"authentication"`
+ EnableSignInWithEmail *bool `access:"authentication"`
+ EnableSignInWithUsername *bool `access:"authentication"`
+ SendEmailNotifications *bool `access:"site"`
+ UseChannelInEmailNotifications *bool `access:"experimental"`
+ RequireEmailVerification *bool `access:"authentication"`
+ FeedbackName *string `access:"site"`
+ FeedbackEmail *string `access:"site"`
+ ReplyToAddress *string `access:"site"`
+ FeedbackOrganization *string `access:"site"`
+ EnableSMTPAuth *bool `access:"environment,write_restrictable"`
+ SMTPUsername *string `access:"environment,write_restrictable"`
+ SMTPPassword *string `access:"environment,write_restrictable"`
+ SMTPServer *string `access:"environment,write_restrictable"`
+ SMTPPort *string `access:"environment,write_restrictable"`
SMTPServerTimeout *int
- ConnectionSecurity *string `restricted:"true"`
- SendPushNotifications *bool
- PushNotificationServer *string
- PushNotificationContents *string
- EnableEmailBatching *bool
- EmailBatchingBufferSize *int
- EmailBatchingInterval *int
- EnablePreviewModeBanner *bool
- SkipServerCertificateVerification *bool `restricted:"true"`
- EmailNotificationContentsType *string
- LoginButtonColor *string
- LoginButtonBorderColor *string
- LoginButtonTextColor *string
+ ConnectionSecurity *string `access:"environment,write_restrictable"`
+ SendPushNotifications *bool `access:"environment"`
+ PushNotificationServer *string `access:"environment"`
+ PushNotificationContents *string `access:"site"`
+ PushNotificationBuffer *int
+ EnableEmailBatching *bool `access:"site"`
+ EmailBatchingBufferSize *int `access:"experimental"`
+ EmailBatchingInterval *int `access:"experimental"`
+ EnablePreviewModeBanner *bool `access:"site"`
+ SkipServerCertificateVerification *bool `access:"environment,write_restrictable"`
+ EmailNotificationContentsType *string `access:"site"`
+ LoginButtonColor *string `access:"experimental"`
+ LoginButtonBorderColor *string `access:"experimental"`
+ LoginButtonTextColor *string `access:"experimental"`
}
func (s *EmailSettings) SetDefaults(isUpdate bool) {
@@ -1462,6 +1487,10 @@ func (s *EmailSettings) SetDefaults(isUpdate bool) {
s.PushNotificationContents = NewString(FULL_NOTIFICATION)
}
+ if s.PushNotificationBuffer == nil {
+ s.PushNotificationBuffer = NewInt(1000)
+ }
+
if s.EnableEmailBatching == nil {
s.EnableEmailBatching = NewBool(false)
}
@@ -1512,13 +1541,13 @@ func (s *EmailSettings) SetDefaults(isUpdate bool) {
}
type RateLimitSettings struct {
- Enable *bool `restricted:"true"`
- PerSec *int `restricted:"true"`
- MaxBurst *int `restricted:"true"`
- MemoryStoreSize *int `restricted:"true"`
- VaryByRemoteAddr *bool `restricted:"true"`
- VaryByUser *bool `restricted:"true"`
- VaryByHeader string `restricted:"true"`
+ Enable *bool `access:"environment,write_restrictable"`
+ PerSec *int `access:"environment,write_restrictable"`
+ MaxBurst *int `access:"environment,write_restrictable"`
+ MemoryStoreSize *int `access:"environment,write_restrictable"`
+ VaryByRemoteAddr *bool `access:"environment,write_restrictable"`
+ VaryByUser *bool `access:"environment,write_restrictable"`
+ VaryByHeader string `access:"environment,write_restrictable"`
}
func (s *RateLimitSettings) SetDefaults() {
@@ -1548,8 +1577,8 @@ func (s *RateLimitSettings) SetDefaults() {
}
type PrivacySettings struct {
- ShowEmailAddress *bool
- ShowFullName *bool
+ ShowEmailAddress *bool `access:"site"`
+ ShowFullName *bool `access:"site"`
}
func (s *PrivacySettings) setDefaults() {
@@ -1563,14 +1592,15 @@ func (s *PrivacySettings) setDefaults() {
}
type SupportSettings struct {
- TermsOfServiceLink *string `restricted:"true"`
- PrivacyPolicyLink *string `restricted:"true"`
- AboutLink *string `restricted:"true"`
- HelpLink *string `restricted:"true"`
- ReportAProblemLink *string `restricted:"true"`
- SupportEmail *string
- CustomTermsOfServiceEnabled *bool
- CustomTermsOfServiceReAcceptancePeriod *int
+ TermsOfServiceLink *string `access:"site,write_restrictable"`
+ PrivacyPolicyLink *string `access:"site,write_restrictable"`
+ AboutLink *string `access:"site,write_restrictable"`
+ HelpLink *string `access:"site,write_restrictable"`
+ ReportAProblemLink *string `access:"site,write_restrictable"`
+ SupportEmail *string `access:"site"`
+ CustomTermsOfServiceEnabled *bool `access:"compliance"`
+ CustomTermsOfServiceReAcceptancePeriod *int `access:"compliance"`
+ EnableAskCommunityLink *bool `access:"site"`
}
func (s *SupportSettings) SetDefaults() {
@@ -1625,14 +1655,23 @@ func (s *SupportSettings) SetDefaults() {
if s.CustomTermsOfServiceReAcceptancePeriod == nil {
s.CustomTermsOfServiceReAcceptancePeriod = NewInt(SUPPORT_SETTINGS_DEFAULT_RE_ACCEPTANCE_PERIOD)
}
+
+ if s.EnableAskCommunityLink == nil {
+ s.EnableAskCommunityLink = NewBool(true)
+ }
}
type AnnouncementSettings struct {
- EnableBanner *bool
- BannerText *string
- BannerColor *string
- BannerTextColor *string
- AllowBannerDismissal *bool
+ EnableBanner *bool `access:"site"`
+ BannerText *string `access:"site"`
+ BannerColor *string `access:"site"`
+ BannerTextColor *string `access:"site"`
+ AllowBannerDismissal *bool `access:"site"`
+ AdminNoticesEnabled *bool `access:"site"`
+ UserNoticesEnabled *bool `access:"site"`
+ NoticesURL *string `access:"site,write_restrictable"`
+ NoticesFetchFrequency *int `access:"site,write_restrictable"`
+ NoticesSkipCache *bool `access:"site,write_restrictable"`
}
func (s *AnnouncementSettings) SetDefaults() {
@@ -1655,12 +1694,30 @@ func (s *AnnouncementSettings) SetDefaults() {
if s.AllowBannerDismissal == nil {
s.AllowBannerDismissal = NewBool(true)
}
+
+ if s.AdminNoticesEnabled == nil {
+ s.AdminNoticesEnabled = NewBool(true)
+ }
+
+ if s.UserNoticesEnabled == nil {
+ s.UserNoticesEnabled = NewBool(true)
+ }
+ if s.NoticesURL == nil {
+ s.NoticesURL = NewString(ANNOUNCEMENT_SETTINGS_DEFAULT_NOTICES_JSON_URL)
+ }
+ if s.NoticesSkipCache == nil {
+ s.NoticesSkipCache = NewBool(false)
+ }
+ if s.NoticesFetchFrequency == nil {
+ s.NoticesFetchFrequency = NewInt(ANNOUNCEMENT_SETTINGS_DEFAULT_NOTICES_FETCH_FREQUENCY_SECONDS)
+ }
+
}
type ThemeSettings struct {
- EnableThemeSelection *bool
- DefaultTheme *string
- AllowCustomThemes *bool
+ EnableThemeSelection *bool `access:"experimental"`
+ DefaultTheme *string `access:"experimental"`
+ AllowCustomThemes *bool `access:"experimental"`
AllowedThemes []string
}
@@ -1683,38 +1740,38 @@ func (s *ThemeSettings) SetDefaults() {
}
type TeamSettings struct {
- SiteName *string
- MaxUsersPerTeam *int
- DEPRECATED_DO_NOT_USE_EnableTeamCreation *bool `json:"EnableTeamCreation" mapstructure:"EnableTeamCreation"` // This field is deprecated and must not be used.
- EnableUserCreation *bool
- EnableOpenServer *bool
- EnableUserDeactivation *bool
- RestrictCreationToDomains *string
- EnableCustomBrand *bool
- CustomBrandText *string
- CustomDescriptionText *string
- RestrictDirectMessage *string
- DEPRECATED_DO_NOT_USE_RestrictTeamInvite *string `json:"RestrictTeamInvite" mapstructure:"RestrictTeamInvite"` // This field is deprecated and must not be used.
- DEPRECATED_DO_NOT_USE_RestrictPublicChannelManagement *string `json:"RestrictPublicChannelManagement" mapstructure:"RestrictPublicChannelManagement"` // This field is deprecated and must not be used.
- DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManagement *string `json:"RestrictPrivateChannelManagement" mapstructure:"RestrictPrivateChannelManagement"` // This field is deprecated and must not be used.
- DEPRECATED_DO_NOT_USE_RestrictPublicChannelCreation *string `json:"RestrictPublicChannelCreation" mapstructure:"RestrictPublicChannelCreation"` // This field is deprecated and must not be used.
- DEPRECATED_DO_NOT_USE_RestrictPrivateChannelCreation *string `json:"RestrictPrivateChannelCreation" mapstructure:"RestrictPrivateChannelCreation"` // This field is deprecated and must not be used.
- DEPRECATED_DO_NOT_USE_RestrictPublicChannelDeletion *string `json:"RestrictPublicChannelDeletion" mapstructure:"RestrictPublicChannelDeletion"` // This field is deprecated and must not be used.
- DEPRECATED_DO_NOT_USE_RestrictPrivateChannelDeletion *string `json:"RestrictPrivateChannelDeletion" mapstructure:"RestrictPrivateChannelDeletion"` // This field is deprecated and must not be used.
- DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManageMembers *string `json:"RestrictPrivateChannelManageMembers" mapstructure:"RestrictPrivateChannelManageMembers"` // This field is deprecated and must not be used.
- EnableXToLeaveChannelsFromLHS *bool
- UserStatusAwayTimeout *int64
- MaxChannelsPerTeam *int64
- MaxNotificationsPerChannel *int64
- EnableConfirmNotificationsToChannel *bool
- TeammateNameDisplay *string
- ExperimentalViewArchivedChannels *bool
- ExperimentalEnableAutomaticReplies *bool
- ExperimentalHideTownSquareinLHS *bool
- ExperimentalTownSquareIsReadOnly *bool
- LockTeammateNameDisplay *bool
- ExperimentalPrimaryTeam *string
- ExperimentalDefaultChannels []string
+ SiteName *string `access:"site"`
+ MaxUsersPerTeam *int `access:"site"`
+ DEPRECATED_DO_NOT_USE_EnableTeamCreation *bool `json:"EnableTeamCreation" mapstructure:"EnableTeamCreation"` // This field is deprecated and must not be used.
+ EnableUserCreation *bool `access:"authentication"`
+ EnableOpenServer *bool `access:"authentication"`
+ EnableUserDeactivation *bool `access:"experimental"`
+ RestrictCreationToDomains *string `access:"authentication"`
+ EnableCustomBrand *bool `access:"site"`
+ CustomBrandText *string `access:"site"`
+ CustomDescriptionText *string `access:"site"`
+ RestrictDirectMessage *string `access:"site"`
+ DEPRECATED_DO_NOT_USE_RestrictTeamInvite *string `json:"RestrictTeamInvite" mapstructure:"RestrictTeamInvite"` // This field is deprecated and must not be used.
+ DEPRECATED_DO_NOT_USE_RestrictPublicChannelManagement *string `json:"RestrictPublicChannelManagement" mapstructure:"RestrictPublicChannelManagement"` // This field is deprecated and must not be used.
+ DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManagement *string `json:"RestrictPrivateChannelManagement" mapstructure:"RestrictPrivateChannelManagement"` // This field is deprecated and must not be used.
+ DEPRECATED_DO_NOT_USE_RestrictPublicChannelCreation *string `json:"RestrictPublicChannelCreation" mapstructure:"RestrictPublicChannelCreation"` // This field is deprecated and must not be used.
+ DEPRECATED_DO_NOT_USE_RestrictPrivateChannelCreation *string `json:"RestrictPrivateChannelCreation" mapstructure:"RestrictPrivateChannelCreation"` // This field is deprecated and must not be used.
+ DEPRECATED_DO_NOT_USE_RestrictPublicChannelDeletion *string `json:"RestrictPublicChannelDeletion" mapstructure:"RestrictPublicChannelDeletion"` // This field is deprecated and must not be used.
+ DEPRECATED_DO_NOT_USE_RestrictPrivateChannelDeletion *string `json:"RestrictPrivateChannelDeletion" mapstructure:"RestrictPrivateChannelDeletion"` // This field is deprecated and must not be used.
+ DEPRECATED_DO_NOT_USE_RestrictPrivateChannelManageMembers *string `json:"RestrictPrivateChannelManageMembers" mapstructure:"RestrictPrivateChannelManageMembers"` // This field is deprecated and must not be used.
+ EnableXToLeaveChannelsFromLHS *bool `access:"experimental"`
+ UserStatusAwayTimeout *int64 `access:"experimental"`
+ MaxChannelsPerTeam *int64 `access:"site"`
+ MaxNotificationsPerChannel *int64 `access:"environment"`
+ EnableConfirmNotificationsToChannel *bool `access:"site"`
+ TeammateNameDisplay *string `access:"site"`
+ ExperimentalViewArchivedChannels *bool `access:"experimental,site"`
+ ExperimentalEnableAutomaticReplies *bool `access:"experimental"`
+ ExperimentalHideTownSquareinLHS *bool `access:"experimental"`
+ ExperimentalTownSquareIsReadOnly *bool `access:"experimental"`
+ LockTeammateNameDisplay *bool `access:"site"`
+ ExperimentalPrimaryTeam *string `access:"experimental"`
+ ExperimentalDefaultChannels []string `access:"experimental"`
}
func (s *TeamSettings) SetDefaults() {
@@ -1857,7 +1914,7 @@ func (s *TeamSettings) SetDefaults() {
}
if s.ExperimentalViewArchivedChannels == nil {
- s.ExperimentalViewArchivedChannels = NewBool(false)
+ s.ExperimentalViewArchivedChannels = NewBool(true)
}
if s.LockTeammateNameDisplay == nil {
@@ -1866,63 +1923,65 @@ func (s *TeamSettings) SetDefaults() {
}
type ClientRequirements struct {
- AndroidLatestVersion string `restricted:"true"`
- AndroidMinVersion string `restricted:"true"`
- DesktopLatestVersion string `restricted:"true"`
- DesktopMinVersion string `restricted:"true"`
- IosLatestVersion string `restricted:"true"`
- IosMinVersion string `restricted:"true"`
+ AndroidLatestVersion string `access:"write_restrictable"`
+ AndroidMinVersion string `access:"write_restrictable"`
+ DesktopLatestVersion string `access:"write_restrictable"`
+ DesktopMinVersion string `access:"write_restrictable"`
+ IosLatestVersion string `access:"write_restrictable"`
+ IosMinVersion string `access:"write_restrictable"`
}
type LdapSettings struct {
// Basic
- Enable *bool
- EnableSync *bool
- LdapServer *string
- LdapPort *int
- ConnectionSecurity *string
- BaseDN *string
- BindUsername *string
- BindPassword *string
+ Enable *bool `access:"authentication"`
+ EnableSync *bool `access:"authentication"`
+ LdapServer *string `access:"authentication"`
+ LdapPort *int `access:"authentication"`
+ ConnectionSecurity *string `access:"authentication"`
+ BaseDN *string `access:"authentication"`
+ BindUsername *string `access:"authentication"`
+ BindPassword *string `access:"authentication"`
// Filtering
- UserFilter *string
- GroupFilter *string
- GuestFilter *string
+ UserFilter *string `access:"authentication"`
+ GroupFilter *string `access:"authentication"`
+ GuestFilter *string `access:"authentication"`
EnableAdminFilter *bool
AdminFilter *string
// Group Mapping
- GroupDisplayNameAttribute *string
- GroupIdAttribute *string
+ GroupDisplayNameAttribute *string `access:"authentication"`
+ GroupIdAttribute *string `access:"authentication"`
// User Mapping
- FirstNameAttribute *string
- LastNameAttribute *string
- EmailAttribute *string
- UsernameAttribute *string
- NicknameAttribute *string
- IdAttribute *string
- PositionAttribute *string
- LoginIdAttribute *string
- PictureAttribute *string
+ FirstNameAttribute *string `access:"authentication"`
+ LastNameAttribute *string `access:"authentication"`
+ EmailAttribute *string `access:"authentication"`
+ UsernameAttribute *string `access:"authentication"`
+ NicknameAttribute *string `access:"authentication"`
+ IdAttribute *string `access:"authentication"`
+ PositionAttribute *string `access:"authentication"`
+ LoginIdAttribute *string `access:"authentication"`
+ PictureAttribute *string `access:"authentication"`
// Synchronization
- SyncIntervalMinutes *int
+ SyncIntervalMinutes *int `access:"authentication"`
// Advanced
- SkipCertificateVerification *bool
- QueryTimeout *int
- MaxPageSize *int
+ SkipCertificateVerification *bool `access:"authentication"`
+ PublicCertificateFile *string `access:"authentication"`
+ PrivateKeyFile *string `access:"authentication"`
+ QueryTimeout *int `access:"authentication"`
+ MaxPageSize *int `access:"authentication"`
// Customization
- LoginFieldName *string
+ LoginFieldName *string `access:"authentication"`
- LoginButtonColor *string
- LoginButtonBorderColor *string
- LoginButtonTextColor *string
+ LoginButtonColor *string `access:"authentication"`
+ LoginButtonBorderColor *string `access:"authentication"`
+ LoginButtonTextColor *string `access:"authentication"`
- Trace *bool
+ Trace *bool `access:"authentication"`
}
func (s *LdapSettings) SetDefaults() {
@@ -1951,6 +2010,14 @@ func (s *LdapSettings) SetDefaults() {
s.ConnectionSecurity = NewString("")
}
+ if s.PublicCertificateFile == nil {
+ s.PublicCertificateFile = NewString("")
+ }
+
+ if s.PrivateKeyFile == nil {
+ s.PrivateKeyFile = NewString("")
+ }
+
if s.BaseDN == nil {
s.BaseDN = NewString("")
}
@@ -2063,9 +2130,9 @@ func (s *LdapSettings) SetDefaults() {
}
type ComplianceSettings struct {
- Enable *bool
- Directory *string
- EnableDaily *bool
+ Enable *bool `access:"compliance"`
+ Directory *string `access:"compliance"`
+ EnableDaily *bool `access:"compliance"`
}
func (s *ComplianceSettings) SetDefaults() {
@@ -2083,9 +2150,9 @@ func (s *ComplianceSettings) SetDefaults() {
}
type LocalizationSettings struct {
- DefaultServerLocale *string
- DefaultClientLocale *string
- AvailableLocales *string
+ DefaultServerLocale *string `access:"site"`
+ DefaultClientLocale *string `access:"site"`
+ AvailableLocales *string `access:"site"`
}
func (s *LocalizationSettings) SetDefaults() {
@@ -2104,48 +2171,48 @@ func (s *LocalizationSettings) SetDefaults() {
type SamlSettings struct {
// Basic
- Enable *bool
- EnableSyncWithLdap *bool
- EnableSyncWithLdapIncludeAuth *bool
+ Enable *bool `access:"authentication"`
+ EnableSyncWithLdap *bool `access:"authentication"`
+ EnableSyncWithLdapIncludeAuth *bool `access:"authentication"`
- Verify *bool
- Encrypt *bool
- SignRequest *bool
+ Verify *bool `access:"authentication"`
+ Encrypt *bool `access:"authentication"`
+ SignRequest *bool `access:"authentication"`
- IdpUrl *string
- IdpDescriptorUrl *string
- IdpMetadataUrl *string
- ServiceProviderIdentifier *string
- AssertionConsumerServiceURL *string
+ IdpUrl *string `access:"authentication"`
+ IdpDescriptorUrl *string `access:"authentication"`
+ IdpMetadataUrl *string `access:"authentication"`
+ ServiceProviderIdentifier *string `access:"authentication"`
+ AssertionConsumerServiceURL *string `access:"authentication"`
- SignatureAlgorithm *string
- CanonicalAlgorithm *string
+ SignatureAlgorithm *string `access:"authentication"`
+ CanonicalAlgorithm *string `access:"authentication"`
- ScopingIDPProviderId *string
- ScopingIDPName *string
+ ScopingIDPProviderId *string `access:"authentication"`
+ ScopingIDPName *string `access:"authentication"`
- IdpCertificateFile *string
- PublicCertificateFile *string
- PrivateKeyFile *string
+ IdpCertificateFile *string `access:"authentication"`
+ PublicCertificateFile *string `access:"authentication"`
+ PrivateKeyFile *string `access:"authentication"`
// User Mapping
- IdAttribute *string
- GuestAttribute *string
+ IdAttribute *string `access:"authentication"`
+ GuestAttribute *string `access:"authentication"`
EnableAdminAttribute *bool
AdminAttribute *string
- FirstNameAttribute *string
- LastNameAttribute *string
- EmailAttribute *string
- UsernameAttribute *string
- NicknameAttribute *string
- LocaleAttribute *string
- PositionAttribute *string
+ FirstNameAttribute *string `access:"authentication"`
+ LastNameAttribute *string `access:"authentication"`
+ EmailAttribute *string `access:"authentication"`
+ UsernameAttribute *string `access:"authentication"`
+ NicknameAttribute *string `access:"authentication"`
+ LocaleAttribute *string `access:"authentication"`
+ PositionAttribute *string `access:"authentication"`
- LoginButtonText *string
+ LoginButtonText *string `access:"authentication"`
- LoginButtonColor *string
- LoginButtonBorderColor *string
- LoginButtonTextColor *string
+ LoginButtonColor *string `access:"authentication"`
+ LoginButtonBorderColor *string `access:"authentication"`
+ LoginButtonTextColor *string `access:"authentication"`
}
func (s *SamlSettings) SetDefaults() {
@@ -2285,9 +2352,9 @@ func (s *SamlSettings) SetDefaults() {
}
type NativeAppSettings struct {
- AppDownloadLink *string `restricted:"true"`
- AndroidAppDownloadLink *string `restricted:"true"`
- IosAppDownloadLink *string `restricted:"true"`
+ AppDownloadLink *string `access:"site,write_restrictable"`
+ AndroidAppDownloadLink *string `access:"site,write_restrictable"`
+ IosAppDownloadLink *string `access:"site,write_restrictable"`
}
func (s *NativeAppSettings) SetDefaults() {
@@ -2305,27 +2372,27 @@ func (s *NativeAppSettings) SetDefaults() {
}
type ElasticsearchSettings struct {
- ConnectionUrl *string `restricted:"true"`
- Username *string `restricted:"true"`
- Password *string `restricted:"true"`
- EnableIndexing *bool `restricted:"true"`
- EnableSearching *bool `restricted:"true"`
- EnableAutocomplete *bool `restricted:"true"`
- Sniff *bool `restricted:"true"`
- PostIndexReplicas *int `restricted:"true"`
- PostIndexShards *int `restricted:"true"`
- ChannelIndexReplicas *int `restricted:"true"`
- ChannelIndexShards *int `restricted:"true"`
- UserIndexReplicas *int `restricted:"true"`
- UserIndexShards *int `restricted:"true"`
- AggregatePostsAfterDays *int `restricted:"true"`
- PostsAggregatorJobStartTime *string `restricted:"true"`
- IndexPrefix *string `restricted:"true"`
- LiveIndexingBatchSize *int `restricted:"true"`
- BulkIndexingTimeWindowSeconds *int `restricted:"true"`
- RequestTimeoutSeconds *int `restricted:"true"`
- SkipTLSVerification *bool `restricted:"true"`
- Trace *string `restricted:"true"`
+ ConnectionUrl *string `access:"environment,write_restrictable"`
+ Username *string `access:"environment,write_restrictable"`
+ Password *string `access:"environment,write_restrictable"`
+ EnableIndexing *bool `access:"environment,write_restrictable"`
+ EnableSearching *bool `access:"environment,write_restrictable"`
+ EnableAutocomplete *bool `access:"environment,write_restrictable"`
+ Sniff *bool `access:"environment,write_restrictable"`
+ PostIndexReplicas *int `access:"environment,write_restrictable"`
+ PostIndexShards *int `access:"environment,write_restrictable"`
+ ChannelIndexReplicas *int `access:"environment,write_restrictable"`
+ ChannelIndexShards *int `access:"environment,write_restrictable"`
+ UserIndexReplicas *int `access:"environment,write_restrictable"`
+ UserIndexShards *int `access:"environment,write_restrictable"`
+ AggregatePostsAfterDays *int `access:"environment,write_restrictable"`
+ PostsAggregatorJobStartTime *string `access:"environment,write_restrictable"`
+ IndexPrefix *string `access:"environment,write_restrictable"`
+ LiveIndexingBatchSize *int `access:"environment,write_restrictable"`
+ BulkIndexingTimeWindowSeconds *int `access:"environment,write_restrictable"`
+ RequestTimeoutSeconds *int `access:"environment,write_restrictable"`
+ SkipTLSVerification *bool `access:"environment,write_restrictable"`
+ Trace *string `access:"environment,write_restrictable"`
}
func (s *ElasticsearchSettings) SetDefaults() {
@@ -2415,11 +2482,11 @@ func (s *ElasticsearchSettings) SetDefaults() {
}
type BleveSettings struct {
- IndexDir *string
- EnableIndexing *bool
- EnableSearching *bool
- EnableAutocomplete *bool
- BulkIndexingTimeWindowSeconds *int
+ IndexDir *string `access:"experimental"`
+ EnableIndexing *bool `access:"experimental"`
+ EnableSearching *bool `access:"experimental"`
+ EnableAutocomplete *bool `access:"experimental"`
+ BulkIndexingTimeWindowSeconds *int `access:"experimental"`
}
func (bs *BleveSettings) SetDefaults() {
@@ -2445,11 +2512,11 @@ func (bs *BleveSettings) SetDefaults() {
}
type DataRetentionSettings struct {
- EnableMessageDeletion *bool
- EnableFileDeletion *bool
- MessageRetentionDays *int
- FileRetentionDays *int
- DeletionJobStartTime *string
+ EnableMessageDeletion *bool `access:"compliance"`
+ EnableFileDeletion *bool `access:"compliance"`
+ MessageRetentionDays *int `access:"compliance"`
+ FileRetentionDays *int `access:"compliance"`
+ DeletionJobStartTime *string `access:"compliance"`
}
func (s *DataRetentionSettings) SetDefaults() {
@@ -2475,8 +2542,8 @@ func (s *DataRetentionSettings) SetDefaults() {
}
type JobSettings struct {
- RunJobs *bool `restricted:"true"`
- RunScheduler *bool `restricted:"true"`
+ RunJobs *bool `access:"write_restrictable"`
+ RunScheduler *bool `access:"write_restrictable"`
}
func (s *JobSettings) SetDefaults() {
@@ -2494,20 +2561,20 @@ type PluginState struct {
}
type PluginSettings struct {
- Enable *bool
- EnableUploads *bool `restricted:"true"`
- AllowInsecureDownloadUrl *bool `restricted:"true"`
- EnableHealthCheck *bool `restricted:"true"`
- Directory *string `restricted:"true"`
- ClientDirectory *string `restricted:"true"`
- Plugins map[string]map[string]interface{}
- PluginStates map[string]*PluginState
- EnableMarketplace *bool
- EnableRemoteMarketplace *bool
- AutomaticPrepackagedPlugins *bool
- RequirePluginSignature *bool
- MarketplaceUrl *string
- SignaturePublicKeyFiles []string
+ Enable *bool `access:"plugins"`
+ EnableUploads *bool `access:"plugins,write_restrictable"`
+ AllowInsecureDownloadUrl *bool `access:"plugins,write_restrictable"`
+ EnableHealthCheck *bool `access:"plugins,write_restrictable"`
+ Directory *string `access:"plugins,write_restrictable"`
+ ClientDirectory *string `access:"plugins,write_restrictable"`
+ Plugins map[string]map[string]interface{} `access:"plugins"`
+ PluginStates map[string]*PluginState `access:"plugins"`
+ EnableMarketplace *bool `access:"plugins"`
+ EnableRemoteMarketplace *bool `access:"plugins"`
+ AutomaticPrepackagedPlugins *bool `access:"plugins"`
+ RequirePluginSignature *bool `access:"plugins"`
+ MarketplaceUrl *string `access:"plugins"`
+ SignaturePublicKeyFiles []string `access:"plugins"`
}
func (s *PluginSettings) SetDefaults(ls LogSettings) {
@@ -2574,10 +2641,11 @@ func (s *PluginSettings) SetDefaults(ls LogSettings) {
}
type GlobalRelayMessageExportSettings struct {
- CustomerType *string // must be either A9 or A10, dictates SMTP server url
- SmtpUsername *string
- SmtpPassword *string
- EmailAddress *string // the address to send messages to
+ CustomerType *string `access:"compliance"` // must be either A9 or A10, dictates SMTP server url
+ SmtpUsername *string `access:"compliance"`
+ SmtpPassword *string `access:"compliance"`
+ EmailAddress *string `access:"compliance"` // the address to send messages to
+ SMTPServerTimeout *int `access:"compliance"`
}
func (s *GlobalRelayMessageExportSettings) SetDefaults() {
@@ -2593,14 +2661,18 @@ func (s *GlobalRelayMessageExportSettings) SetDefaults() {
if s.EmailAddress == nil {
s.EmailAddress = NewString("")
}
+ if s.SMTPServerTimeout == nil || *s.SMTPServerTimeout == 0 {
+ s.SMTPServerTimeout = NewInt(1800)
+ }
}
type MessageExportSettings struct {
- EnableExport *bool
- ExportFormat *string
- DailyRunTime *string
- ExportFromTimestamp *int64
- BatchSize *int
+ EnableExport *bool `access:"compliance"`
+ ExportFormat *string `access:"compliance"`
+ DailyRunTime *string `access:"compliance"`
+ ExportFromTimestamp *int64 `access:"compliance"`
+ BatchSize *int `access:"compliance"`
+ DownloadExportResults *bool `access:"compliance"`
// formatter-specific settings - these are only expected to be non-nil if ExportFormat is set to the associated format
GlobalRelaySettings *GlobalRelayMessageExportSettings
@@ -2611,6 +2683,10 @@ func (s *MessageExportSettings) SetDefaults() {
s.EnableExport = NewBool(false)
}
+ if s.DownloadExportResults == nil {
+ s.DownloadExportResults = NewBool(false)
+ }
+
if s.ExportFormat == nil {
s.ExportFormat = NewString(COMPLIANCE_EXPORT_TYPE_ACTIANCE)
}
@@ -2634,8 +2710,8 @@ func (s *MessageExportSettings) SetDefaults() {
}
type DisplaySettings struct {
- CustomUrlSchemes []string
- ExperimentalTimezone *bool
+ CustomUrlSchemes []string `access:"site"`
+ ExperimentalTimezone *bool `access:"experimental"`
}
func (s *DisplaySettings) SetDefaults() {
@@ -2645,15 +2721,15 @@ func (s *DisplaySettings) SetDefaults() {
}
if s.ExperimentalTimezone == nil {
- s.ExperimentalTimezone = NewBool(false)
+ s.ExperimentalTimezone = NewBool(true)
}
}
type GuestAccountsSettings struct {
- Enable *bool
- AllowEmailAccounts *bool
- EnforceMultifactorAuthentication *bool
- RestrictCreationToDomains *string
+ Enable *bool `access:"authentication"`
+ AllowEmailAccounts *bool `access:"authentication"`
+ EnforceMultifactorAuthentication *bool `access:"authentication"`
+ RestrictCreationToDomains *string `access:"authentication"`
}
func (s *GuestAccountsSettings) SetDefaults() {
@@ -2675,10 +2751,10 @@ func (s *GuestAccountsSettings) SetDefaults() {
}
type ImageProxySettings struct {
- Enable *bool
- ImageProxyType *string
- RemoteImageProxyURL *string
- RemoteImageProxyOptions *string
+ Enable *bool `access:"environment"`
+ ImageProxyType *string `access:"environment"`
+ RemoteImageProxyURL *string `access:"environment"`
+ RemoteImageProxyOptions *string `access:"environment"`
}
func (s *ImageProxySettings) SetDefaults(ss ServiceSettings) {
@@ -2717,6 +2793,35 @@ func (s *ImageProxySettings) SetDefaults(ss ServiceSettings) {
type ConfigFunc func() *Config
+const ConfigAccessTagWriteRestrictable = "write_restrictable"
+
+// Config fields support the 'access' tag with the following values corresponding to the suffix of the associated
+// PERMISSION_SYSCONSOLE_*_* permission Id: 'about', 'reporting', 'user_management_users',
+// 'user_management_groups', 'user_management_teams', 'user_management_channels',
+// 'user_management_permissions', 'environment', 'site', 'authentication', 'plugins',
+// 'integrations', 'compliance', 'plugins', and 'experimental'. They grant read and/or write access to the config field
+// to roles without PERMISSION_MANAGE_SYSTEM.
+//
+// By default config values can be written with PERMISSION_MANAGE_SYSTEM, but if ExperimentalSettings.RestrictSystemAdmin is true
+// and the access tag contains the value 'write_restrictable', then even PERMISSION_MANAGE_SYSTEM does not grant write access.
+//
+// PERMISSION_MANAGE_SYSTEM always grants read access.
+//
+// Example:
+// type HairSettings struct {
+// // Colour is writeable with either PERMISSION_SYSCONSOLE_WRITE_REPORTING or PERMISSION_SYSCONSOLE_WRITE_USER_MANAGEMENT_GROUPS.
+// // It is readable by PERMISSION_SYSCONSOLE_READ_REPORTING and PERMISSION_SYSCONSOLE_READ_USER_MANAGEMENT_GROUPS permissions.
+// // PERMISSION_MANAGE_SYSTEM grants read and write access.
+// Colour string `access:"reporting,user_management_groups"`
+//
+//
+// // Length is only readable and writable via PERMISSION_MANAGE_SYSTEM.
+// Length string
+//
+// // Product is only writeable by PERMISSION_MANAGE_SYSTEM if ExperimentalSettings.RestrictSystemAdmin is false.
+// // PERMISSION_MANAGE_SYSTEM can always read the value.
+// Product bool `access:write_restrictable`
+// }
type Config struct {
ServiceSettings ServiceSettings
TeamSettings TeamSettings
@@ -2994,6 +3099,10 @@ func (s *FileSettings) isValid() *AppError {
return NewAppError("Config.IsValid", "model.config.is_valid.file_salt.app_error", nil, "", http.StatusBadRequest)
}
+ if *s.Directory == "" {
+ return NewAppError("Config.IsValid", "model.config.is_valid.directory.app_error", nil, "", http.StatusBadRequest)
+ }
+
return nil
}
@@ -3436,6 +3545,14 @@ func (o *Config) Sanitize() {
*o.GitLabSettings.Secret = FAKE_SETTING
}
+ if o.GoogleSettings.Secret != nil && len(*o.GoogleSettings.Secret) > 0 {
+ *o.GoogleSettings.Secret = FAKE_SETTING
+ }
+
+ if o.Office365Settings.Secret != nil && len(*o.Office365Settings.Secret) > 0 {
+ *o.Office365Settings.Secret = FAKE_SETTING
+ }
+
*o.SqlSettings.DataSource = FAKE_SETTING
*o.SqlSettings.AtRestEncryptKey = FAKE_SETTING
@@ -3448,4 +3565,12 @@ func (o *Config) Sanitize() {
for i := range o.SqlSettings.DataSourceSearchReplicas {
o.SqlSettings.DataSourceSearchReplicas[i] = FAKE_SETTING
}
+
+ if o.MessageExportSettings.GlobalRelaySettings.SmtpPassword != nil && len(*o.MessageExportSettings.GlobalRelaySettings.SmtpPassword) > 0 {
+ *o.MessageExportSettings.GlobalRelaySettings.SmtpPassword = FAKE_SETTING
+ }
+
+ if o.ServiceSettings.GfycatApiSecret != nil && len(*o.ServiceSettings.GfycatApiSecret) > 0 {
+ *o.ServiceSettings.GfycatApiSecret = FAKE_SETTING
+ }
}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/file_info.go b/vendor/github.com/mattermost/mattermost-server/v5/model/file_info.go
index 8a3a5cc0..4b71f5a8 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/file_info.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/file_info.go
@@ -4,7 +4,6 @@
package model
import (
- "bytes"
"encoding/json"
"image"
"image/gif"
@@ -151,10 +150,10 @@ func NewInfo(name string) *FileInfo {
return info
}
-func GetInfoForBytes(name string, data []byte) (*FileInfo, *AppError) {
+func GetInfoForBytes(name string, data io.ReadSeeker, size int) (*FileInfo, *AppError) {
info := &FileInfo{
Name: name,
- Size: int64(len(data)),
+ Size: int64(size),
}
var err *AppError
@@ -170,16 +169,17 @@ func GetInfoForBytes(name string, data []byte) (*FileInfo, *AppError) {
if info.IsImage() {
// Only set the width and height if it's actually an image that we can understand
- if config, _, err := image.DecodeConfig(bytes.NewReader(data)); err == nil {
+ if config, _, err := image.DecodeConfig(data); err == nil {
info.Width = config.Width
info.Height = config.Height
if info.MimeType == "image/gif" {
// Just show the gif itself instead of a preview image for animated gifs
- if gifConfig, err := gif.DecodeAll(bytes.NewReader(data)); err != nil {
+ data.Seek(0, io.SeekStart)
+ if gifConfig, err := gif.DecodeAll(data); err != nil {
// Still return the rest of the info even though it doesn't appear to be an actual gif
info.HasPreviewImage = true
- return info, NewAppError("GetInfoForBytes", "model.file_info.get.gif.app_error", nil, "name="+name, http.StatusBadRequest)
+ return info, NewAppError("GetInfoForBytes", "model.file_info.get.gif.app_error", nil, err.Error(), http.StatusBadRequest)
} else {
info.HasPreviewImage = len(gifConfig.Image) == 1
}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/group.go b/vendor/github.com/mattermost/mattermost-server/v5/model/group.go
index 4de0dcc4..2eda1184 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/group.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/group.go
@@ -94,6 +94,11 @@ type PageOpts struct {
PerPage int
}
+type GroupStats struct {
+ GroupID string `json:"group_id"`
+ TotalMemberCount int64 `json:"total_member_count"`
+}
+
func (group *Group) Patch(patch *GroupPatch) {
if patch.Name != nil {
group.Name = patch.Name
@@ -208,3 +213,9 @@ func GroupPatchFromJson(data io.Reader) *GroupPatch {
json.NewDecoder(data).Decode(&groupPatch)
return groupPatch
}
+
+func GroupStatsFromJson(data io.Reader) *GroupStats {
+ var groupStats *GroupStats
+ json.NewDecoder(data).Decode(&groupStats)
+ return groupStats
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/integrity.go b/vendor/github.com/mattermost/mattermost-server/v5/model/integrity.go
new file mode 100644
index 00000000..744ad07c
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/integrity.go
@@ -0,0 +1,58 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See LICENSE.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "errors"
+)
+
+type OrphanedRecord struct {
+ ParentId *string `json:"parent_id"`
+ ChildId *string `json:"child_id"`
+}
+
+type RelationalIntegrityCheckData struct {
+ ParentName string `json:"parent_name"`
+ ChildName string `json:"child_name"`
+ ParentIdAttr string `json:"parent_id_attr"`
+ ChildIdAttr string `json:"child_id_attr"`
+ Records []OrphanedRecord `json:"records"`
+}
+
+type IntegrityCheckResult struct {
+ Data interface{} `json:"data"`
+ Err error `json:"err"`
+}
+
+func (r *IntegrityCheckResult) UnmarshalJSON(b []byte) error {
+ var data map[string]interface{}
+ if err := json.Unmarshal(b, &data); err != nil {
+ return err
+ }
+ if d, ok := data["data"]; ok && d != nil {
+ var rdata RelationalIntegrityCheckData
+ m := d.(map[string]interface{})
+ rdata.ParentName = m["parent_name"].(string)
+ rdata.ChildName = m["child_name"].(string)
+ rdata.ParentIdAttr = m["parent_id_attr"].(string)
+ rdata.ChildIdAttr = m["child_id_attr"].(string)
+ for _, recData := range m["records"].([]interface{}) {
+ var record OrphanedRecord
+ m := recData.(map[string]interface{})
+ if val := m["parent_id"]; val != nil {
+ record.ParentId = NewString(val.(string))
+ }
+ if val := m["child_id"]; val != nil {
+ record.ChildId = NewString(val.(string))
+ }
+ rdata.Records = append(rdata.Records, record)
+ }
+ r.Data = rdata
+ }
+ if err, ok := data["err"]; ok && err != nil {
+ r.Err = errors.New(data["err"].(string))
+ }
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/job.go b/vendor/github.com/mattermost/mattermost-server/v5/model/job.go
index e6e1d689..a4bb30a1 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/job.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/job.go
@@ -19,6 +19,9 @@ const (
JOB_TYPE_LDAP_SYNC = "ldap_sync"
JOB_TYPE_MIGRATIONS = "migrations"
JOB_TYPE_PLUGINS = "plugins"
+ JOB_TYPE_EXPIRY_NOTIFY = "expiry_notify"
+ JOB_TYPE_PRODUCT_NOTICES = "product_notices"
+ JOB_TYPE_ACTIVE_USERS = "active_users"
JOB_STATUS_PENDING = "pending"
JOB_STATUS_IN_PROGRESS = "in_progress"
@@ -59,6 +62,9 @@ func (j *Job) IsValid() *AppError {
case JOB_TYPE_MESSAGE_EXPORT:
case JOB_TYPE_MIGRATIONS:
case JOB_TYPE_PLUGINS:
+ case JOB_TYPE_PRODUCT_NOTICES:
+ case JOB_TYPE_EXPIRY_NOTIFY:
+ case JOB_TYPE_ACTIVE_USERS:
default:
return NewAppError("Job.IsValid", "model.job.is_valid.type.app_error", nil, "id="+j.Id, http.StatusBadRequest)
}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/ldap.go b/vendor/github.com/mattermost/mattermost-server/v5/model/ldap.go
index d5f98f1a..4e19c5b1 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/ldap.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/ldap.go
@@ -4,5 +4,7 @@
package model
const (
- USER_AUTH_SERVICE_LDAP = "ldap"
+ USER_AUTH_SERVICE_LDAP = "ldap"
+ LDAP_PUBIC_CERTIFICATE_NAME = "ldap-public.crt"
+ LDAP_PRIVATE_KEY_NAME = "ldap-private.key"
)
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/license.go b/vendor/github.com/mattermost/mattermost-server/v5/model/license.go
index 0504edc0..3de4aba8 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/license.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/license.go
@@ -13,7 +13,7 @@ const (
EXPIRED_LICENSE_ERROR = "api.license.add_license.expired.app_error"
INVALID_LICENSE_ERROR = "api.license.add_license.invalid.app_error"
LICENSE_GRACE_PERIOD = 1000 * 60 * 60 * 24 * 10 //10 days
- LICENSE_RENEWAL_LINK = "https://licensing.mattermost.com/renew"
+ LICENSE_RENEWAL_LINK = "https://mattermost.com/renew/"
)
type LicenseRecord struct {
@@ -81,6 +81,8 @@ type Features struct {
IDLoadedPushNotifications *bool `json:"id_loaded"`
LockTeammateNameDisplay *bool `json:"lock_teammate_name_display"`
EnterprisePlugins *bool `json:"enterprise_plugins"`
+ AdvancedLogging *bool `json:"advanced_logging"`
+ Cloud *bool `json:"cloud"`
// after we enabled more features we'll need to control them with this
FutureFeatures *bool `json:"future_features"`
@@ -108,6 +110,8 @@ func (f *Features) ToMap() map[string]interface{} {
"id_loaded": *f.IDLoadedPushNotifications,
"lock_teammate_name_display": *f.LockTeammateNameDisplay,
"enterprise_plugins": *f.EnterprisePlugins,
+ "advanced_logging": *f.AdvancedLogging,
+ "cloud": *f.Cloud,
"future": *f.FutureFeatures,
}
}
@@ -212,6 +216,14 @@ func (f *Features) SetDefaults() {
if f.EnterprisePlugins == nil {
f.EnterprisePlugins = NewBool(*f.FutureFeatures)
}
+
+ if f.AdvancedLogging == nil {
+ f.AdvancedLogging = NewBool(*f.FutureFeatures)
+ }
+
+ if f.Cloud == nil {
+ f.Cloud = NewBool(false)
+ }
}
func (l *License) IsExpired() bool {
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/link_metadata.go b/vendor/github.com/mattermost/mattermost-server/v5/model/link_metadata.go
index d20be2cb..6c3e0bd8 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/link_metadata.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/link_metadata.go
@@ -171,9 +171,9 @@ func (o *LinkMetadata) DeserializeDataToConcreteType() error {
// FloorToNearestHour takes a timestamp (in milliseconds) and returns it rounded to the previous hour in UTC.
func FloorToNearestHour(ms int64) int64 {
- t := time.Unix(0, ms*int64(1000*1000))
+ t := time.Unix(0, ms*int64(1000*1000)).UTC()
- return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, t.Location()).UnixNano() / int64(time.Millisecond)
+ return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, time.UTC).UnixNano() / int64(time.Millisecond)
}
// isRoundedToNearestHour returns true if the given timestamp (in milliseconds) has been rounded to the nearest hour in UTC.
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/migration.go b/vendor/github.com/mattermost/mattermost-server/v5/model/migration.go
index 7dd08bef..2e7a0f71 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/migration.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/migration.go
@@ -4,6 +4,7 @@
package model
const (
+ ADVANCED_PERMISSIONS_MIGRATION_KEY = "AdvancedPermissionsMigrationComplete"
MIGRATION_KEY_ADVANCED_PERMISSIONS_PHASE_2 = "migration_advanced_permissions_phase_2"
MIGRATION_KEY_EMOJI_PERMISSIONS_SPLIT = "emoji_permissions_split"
@@ -17,4 +18,7 @@ const (
MIGRATION_KEY_ADD_MANAGE_GUESTS_PERMISSIONS = "add_manage_guests_permissions"
MIGRATION_KEY_CHANNEL_MODERATIONS_PERMISSIONS = "channel_moderations_permissions"
MIGRATION_KEY_ADD_USE_GROUP_MENTIONS_PERMISSION = "add_use_group_mentions_permission"
+ MIGRATION_KEY_ADD_SYSTEM_CONSOLE_PERMISSIONS = "add_system_console_permissions"
+ MIGRATION_KEY_SIDEBAR_CATEGORIES_PHASE_2 = "migration_sidebar_categories_phase_2"
+ MIGRATION_KEY_ADD_CONVERT_CHANNEL_PERMISSIONS = "add_convert_channel_permissions"
)
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/outgoing_webhook.go b/vendor/github.com/mattermost/mattermost-server/v5/model/outgoing_webhook.go
index f4278de0..d6cb2138 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/outgoing_webhook.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/outgoing_webhook.go
@@ -112,6 +112,9 @@ func (o *OutgoingWebhookResponse) ToJson() string {
func OutgoingWebhookResponseFromJson(data io.Reader) (*OutgoingWebhookResponse, error) {
var o *OutgoingWebhookResponse
err := json.NewDecoder(data).Decode(&o)
+ if err == io.EOF {
+ return nil, nil
+ }
return o, err
}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/permission.go b/vendor/github.com/mattermost/mattermost-server/v5/model/permission.go
index cc3c5a70..cf05c281 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/permission.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/permission.go
@@ -4,9 +4,9 @@
package model
const (
- PERMISSION_SCOPE_SYSTEM = "system_scope"
- PERMISSION_SCOPE_TEAM = "team_scope"
- PERMISSION_SCOPE_CHANNEL = "channel_scope"
+ PermissionScopeSystem = "system_scope"
+ PermissionScopeTeam = "team_scope"
+ PermissionScopeChannel = "channel_scope"
)
type Permission struct {
@@ -25,6 +25,8 @@ var PERMISSION_CREATE_PUBLIC_CHANNEL *Permission
var PERMISSION_CREATE_PRIVATE_CHANNEL *Permission
var PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS *Permission
var PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS *Permission
+var PERMISSION_CONVERT_PUBLIC_CHANNEL_TO_PRIVATE *Permission
+var PERMISSION_CONVERT_PRIVATE_CHANNEL_TO_PUBLIC *Permission
var PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE *Permission
var PERMISSION_MANAGE_ROLES *Permission
var PERMISSION_MANAGE_TEAM_ROLES *Permission
@@ -43,6 +45,8 @@ var PERMISSION_DELETE_PUBLIC_CHANNEL *Permission
var PERMISSION_DELETE_PRIVATE_CHANNEL *Permission
var PERMISSION_EDIT_OTHER_USERS *Permission
var PERMISSION_READ_CHANNEL *Permission
+var PERMISSION_READ_PUBLIC_CHANNEL_GROUPS *Permission
+var PERMISSION_READ_PRIVATE_CHANNEL_GROUPS *Permission
var PERMISSION_READ_PUBLIC_CHANNEL *Permission
var PERMISSION_ADD_REACTION *Permission
var PERMISSION_REMOVE_REACTION *Permission
@@ -76,6 +80,7 @@ var PERMISSION_MANAGE_TEAM *Permission
var PERMISSION_IMPORT_TEAM *Permission
var PERMISSION_VIEW_TEAM *Permission
var PERMISSION_LIST_USERS_WITHOUT_TEAM *Permission
+var PERMISSION_READ_JOBS *Permission
var PERMISSION_MANAGE_JOBS *Permission
var PERMISSION_CREATE_USER_ACCESS_TOKEN *Permission
var PERMISSION_READ_USER_ACCESS_TOKEN *Permission
@@ -92,537 +97,867 @@ var PERMISSION_PROMOTE_GUEST *Permission
var PERMISSION_DEMOTE_TO_GUEST *Permission
var PERMISSION_USE_CHANNEL_MENTIONS *Permission
var PERMISSION_USE_GROUP_MENTIONS *Permission
+var PERMISSION_READ_OTHER_USERS_TEAMS *Permission
+var PERMISSION_EDIT_BRAND *Permission
+
+var PERMISSION_SYSCONSOLE_READ_ABOUT *Permission
+var PERMISSION_SYSCONSOLE_WRITE_ABOUT *Permission
+
+var PERMISSION_SYSCONSOLE_READ_REPORTING *Permission
+var PERMISSION_SYSCONSOLE_WRITE_REPORTING *Permission
+
+var PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_USERS *Permission
+var PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_USERS *Permission
+
+var PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_GROUPS *Permission
+var PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_GROUPS *Permission
+
+var PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS *Permission
+var PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_TEAMS *Permission
+
+var PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS *Permission
+var PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_CHANNELS *Permission
+
+var PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_PERMISSIONS *Permission
+var PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_PERMISSIONS *Permission
+
+var PERMISSION_SYSCONSOLE_READ_ENVIRONMENT *Permission
+var PERMISSION_SYSCONSOLE_WRITE_ENVIRONMENT *Permission
+
+var PERMISSION_SYSCONSOLE_READ_SITE *Permission
+var PERMISSION_SYSCONSOLE_WRITE_SITE *Permission
+
+var PERMISSION_SYSCONSOLE_READ_AUTHENTICATION *Permission
+var PERMISSION_SYSCONSOLE_WRITE_AUTHENTICATION *Permission
+
+var PERMISSION_SYSCONSOLE_READ_PLUGINS *Permission
+var PERMISSION_SYSCONSOLE_WRITE_PLUGINS *Permission
+
+var PERMISSION_SYSCONSOLE_READ_INTEGRATIONS *Permission
+var PERMISSION_SYSCONSOLE_WRITE_INTEGRATIONS *Permission
+
+var PERMISSION_SYSCONSOLE_READ_COMPLIANCE *Permission
+var PERMISSION_SYSCONSOLE_WRITE_COMPLIANCE *Permission
+
+var PERMISSION_SYSCONSOLE_READ_EXPERIMENTAL *Permission
+var PERMISSION_SYSCONSOLE_WRITE_EXPERIMENTAL *Permission
// General permission that encompasses all system admin functions
// in the future this could be broken up to allow access to some
// admin functions but not others
var PERMISSION_MANAGE_SYSTEM *Permission
-var ALL_PERMISSIONS []*Permission
+var AllPermissions []*Permission
+var DeprecatedPermissions []*Permission
-var CHANNEL_MODERATED_PERMISSIONS []string
-var CHANNEL_MODERATED_PERMISSIONS_MAP map[string]string
+var ChannelModeratedPermissions []string
+var ChannelModeratedPermissionsMap map[string]string
+
+var SysconsoleReadPermissions []*Permission
+var SysconsoleWritePermissions []*Permission
func initializePermissions() {
PERMISSION_INVITE_USER = &Permission{
"invite_user",
"authentication.permissions.team_invite_user.name",
"authentication.permissions.team_invite_user.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_ADD_USER_TO_TEAM = &Permission{
"add_user_to_team",
"authentication.permissions.add_user_to_team.name",
"authentication.permissions.add_user_to_team.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_USE_SLASH_COMMANDS = &Permission{
"use_slash_commands",
"authentication.permissions.team_use_slash_commands.name",
"authentication.permissions.team_use_slash_commands.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_MANAGE_SLASH_COMMANDS = &Permission{
"manage_slash_commands",
"authentication.permissions.manage_slash_commands.name",
"authentication.permissions.manage_slash_commands.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS = &Permission{
"manage_others_slash_commands",
"authentication.permissions.manage_others_slash_commands.name",
"authentication.permissions.manage_others_slash_commands.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_CREATE_PUBLIC_CHANNEL = &Permission{
"create_public_channel",
"authentication.permissions.create_public_channel.name",
"authentication.permissions.create_public_channel.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_CREATE_PRIVATE_CHANNEL = &Permission{
"create_private_channel",
"authentication.permissions.create_private_channel.name",
"authentication.permissions.create_private_channel.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS = &Permission{
"manage_public_channel_members",
"authentication.permissions.manage_public_channel_members.name",
"authentication.permissions.manage_public_channel_members.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS = &Permission{
"manage_private_channel_members",
"authentication.permissions.manage_private_channel_members.name",
"authentication.permissions.manage_private_channel_members.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
+ }
+ PERMISSION_CONVERT_PUBLIC_CHANNEL_TO_PRIVATE = &Permission{
+ "convert_public_channel_to_private",
+ "authentication.permissions.convert_public_channel_to_private.name",
+ "authentication.permissions.convert_public_channel_to_private.description",
+ PermissionScopeChannel,
+ }
+ PERMISSION_CONVERT_PRIVATE_CHANNEL_TO_PUBLIC = &Permission{
+ "convert_private_channel_to_public",
+ "authentication.permissions.convert_private_channel_to_public.name",
+ "authentication.permissions.convert_private_channel_to_public.description",
+ PermissionScopeChannel,
}
PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE = &Permission{
"assign_system_admin_role",
"authentication.permissions.assign_system_admin_role.name",
"authentication.permissions.assign_system_admin_role.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_MANAGE_ROLES = &Permission{
"manage_roles",
"authentication.permissions.manage_roles.name",
"authentication.permissions.manage_roles.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_MANAGE_TEAM_ROLES = &Permission{
"manage_team_roles",
"authentication.permissions.manage_team_roles.name",
"authentication.permissions.manage_team_roles.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_MANAGE_CHANNEL_ROLES = &Permission{
"manage_channel_roles",
"authentication.permissions.manage_channel_roles.name",
"authentication.permissions.manage_channel_roles.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_MANAGE_SYSTEM = &Permission{
"manage_system",
"authentication.permissions.manage_system.name",
"authentication.permissions.manage_system.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_CREATE_DIRECT_CHANNEL = &Permission{
"create_direct_channel",
"authentication.permissions.create_direct_channel.name",
"authentication.permissions.create_direct_channel.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_CREATE_GROUP_CHANNEL = &Permission{
"create_group_channel",
"authentication.permissions.create_group_channel.name",
"authentication.permissions.create_group_channel.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES = &Permission{
"manage_public_channel_properties",
"authentication.permissions.manage_public_channel_properties.name",
"authentication.permissions.manage_public_channel_properties.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES = &Permission{
"manage_private_channel_properties",
"authentication.permissions.manage_private_channel_properties.name",
"authentication.permissions.manage_private_channel_properties.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_LIST_PUBLIC_TEAMS = &Permission{
"list_public_teams",
"authentication.permissions.list_public_teams.name",
"authentication.permissions.list_public_teams.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_JOIN_PUBLIC_TEAMS = &Permission{
"join_public_teams",
"authentication.permissions.join_public_teams.name",
"authentication.permissions.join_public_teams.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_LIST_PRIVATE_TEAMS = &Permission{
"list_private_teams",
"authentication.permissions.list_private_teams.name",
"authentication.permissions.list_private_teams.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_JOIN_PRIVATE_TEAMS = &Permission{
"join_private_teams",
"authentication.permissions.join_private_teams.name",
"authentication.permissions.join_private_teams.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_LIST_TEAM_CHANNELS = &Permission{
"list_team_channels",
"authentication.permissions.list_team_channels.name",
"authentication.permissions.list_team_channels.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_JOIN_PUBLIC_CHANNELS = &Permission{
"join_public_channels",
"authentication.permissions.join_public_channels.name",
"authentication.permissions.join_public_channels.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_DELETE_PUBLIC_CHANNEL = &Permission{
"delete_public_channel",
"authentication.permissions.delete_public_channel.name",
"authentication.permissions.delete_public_channel.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_DELETE_PRIVATE_CHANNEL = &Permission{
"delete_private_channel",
"authentication.permissions.delete_private_channel.name",
"authentication.permissions.delete_private_channel.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_EDIT_OTHER_USERS = &Permission{
"edit_other_users",
"authentication.permissions.edit_other_users.name",
"authentication.permissions.edit_other_users.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_READ_CHANNEL = &Permission{
"read_channel",
"authentication.permissions.read_channel.name",
"authentication.permissions.read_channel.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
+ }
+ PERMISSION_READ_PUBLIC_CHANNEL_GROUPS = &Permission{
+ "read_public_channel_groups",
+ "authentication.permissions.read_public_channel_groups.name",
+ "authentication.permissions.read_public_channel_groups.description",
+ PermissionScopeChannel,
+ }
+ PERMISSION_READ_PRIVATE_CHANNEL_GROUPS = &Permission{
+ "read_private_channel_groups",
+ "authentication.permissions.read_private_channel_groups.name",
+ "authentication.permissions.read_private_channel_groups.description",
+ PermissionScopeChannel,
}
PERMISSION_READ_PUBLIC_CHANNEL = &Permission{
"read_public_channel",
"authentication.permissions.read_public_channel.name",
"authentication.permissions.read_public_channel.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_ADD_REACTION = &Permission{
"add_reaction",
"authentication.permissions.add_reaction.name",
"authentication.permissions.add_reaction.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_REMOVE_REACTION = &Permission{
"remove_reaction",
"authentication.permissions.remove_reaction.name",
"authentication.permissions.remove_reaction.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_REMOVE_OTHERS_REACTIONS = &Permission{
"remove_others_reactions",
"authentication.permissions.remove_others_reactions.name",
"authentication.permissions.remove_others_reactions.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
// DEPRECATED
PERMISSION_PERMANENT_DELETE_USER = &Permission{
"permanent_delete_user",
"authentication.permissions.permanent_delete_user.name",
"authentication.permissions.permanent_delete_user.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_UPLOAD_FILE = &Permission{
"upload_file",
"authentication.permissions.upload_file.name",
"authentication.permissions.upload_file.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_GET_PUBLIC_LINK = &Permission{
"get_public_link",
"authentication.permissions.get_public_link.name",
"authentication.permissions.get_public_link.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
// DEPRECATED
PERMISSION_MANAGE_WEBHOOKS = &Permission{
"manage_webhooks",
"authentication.permissions.manage_webhooks.name",
"authentication.permissions.manage_webhooks.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
// DEPRECATED
PERMISSION_MANAGE_OTHERS_WEBHOOKS = &Permission{
"manage_others_webhooks",
"authentication.permissions.manage_others_webhooks.name",
"authentication.permissions.manage_others_webhooks.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_MANAGE_INCOMING_WEBHOOKS = &Permission{
"manage_incoming_webhooks",
"authentication.permissions.manage_incoming_webhooks.name",
"authentication.permissions.manage_incoming_webhooks.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_MANAGE_OUTGOING_WEBHOOKS = &Permission{
"manage_outgoing_webhooks",
"authentication.permissions.manage_outgoing_webhooks.name",
"authentication.permissions.manage_outgoing_webhooks.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_MANAGE_OTHERS_INCOMING_WEBHOOKS = &Permission{
"manage_others_incoming_webhooks",
"authentication.permissions.manage_others_incoming_webhooks.name",
"authentication.permissions.manage_others_incoming_webhooks.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_MANAGE_OTHERS_OUTGOING_WEBHOOKS = &Permission{
"manage_others_outgoing_webhooks",
"authentication.permissions.manage_others_outgoing_webhooks.name",
"authentication.permissions.manage_others_outgoing_webhooks.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_MANAGE_OAUTH = &Permission{
"manage_oauth",
"authentication.permissions.manage_oauth.name",
"authentication.permissions.manage_oauth.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH = &Permission{
"manage_system_wide_oauth",
"authentication.permissions.manage_system_wide_oauth.name",
"authentication.permissions.manage_system_wide_oauth.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
// DEPRECATED
PERMISSION_MANAGE_EMOJIS = &Permission{
"manage_emojis",
"authentication.permissions.manage_emojis.name",
"authentication.permissions.manage_emojis.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
// DEPRECATED
PERMISSION_MANAGE_OTHERS_EMOJIS = &Permission{
"manage_others_emojis",
"authentication.permissions.manage_others_emojis.name",
"authentication.permissions.manage_others_emojis.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_CREATE_EMOJIS = &Permission{
"create_emojis",
"authentication.permissions.create_emojis.name",
"authentication.permissions.create_emojis.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_DELETE_EMOJIS = &Permission{
"delete_emojis",
"authentication.permissions.delete_emojis.name",
"authentication.permissions.delete_emojis.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_DELETE_OTHERS_EMOJIS = &Permission{
"delete_others_emojis",
"authentication.permissions.delete_others_emojis.name",
"authentication.permissions.delete_others_emojis.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_CREATE_POST = &Permission{
"create_post",
"authentication.permissions.create_post.name",
"authentication.permissions.create_post.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_CREATE_POST_PUBLIC = &Permission{
"create_post_public",
"authentication.permissions.create_post_public.name",
"authentication.permissions.create_post_public.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_CREATE_POST_EPHEMERAL = &Permission{
"create_post_ephemeral",
"authentication.permissions.create_post_ephemeral.name",
"authentication.permissions.create_post_ephemeral.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_EDIT_POST = &Permission{
"edit_post",
"authentication.permissions.edit_post.name",
"authentication.permissions.edit_post.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_EDIT_OTHERS_POSTS = &Permission{
"edit_others_posts",
"authentication.permissions.edit_others_posts.name",
"authentication.permissions.edit_others_posts.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_DELETE_POST = &Permission{
"delete_post",
"authentication.permissions.delete_post.name",
"authentication.permissions.delete_post.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_DELETE_OTHERS_POSTS = &Permission{
"delete_others_posts",
"authentication.permissions.delete_others_posts.name",
"authentication.permissions.delete_others_posts.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
PERMISSION_REMOVE_USER_FROM_TEAM = &Permission{
"remove_user_from_team",
"authentication.permissions.remove_user_from_team.name",
"authentication.permissions.remove_user_from_team.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_CREATE_TEAM = &Permission{
"create_team",
"authentication.permissions.create_team.name",
"authentication.permissions.create_team.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_MANAGE_TEAM = &Permission{
"manage_team",
"authentication.permissions.manage_team.name",
"authentication.permissions.manage_team.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_IMPORT_TEAM = &Permission{
"import_team",
"authentication.permissions.import_team.name",
"authentication.permissions.import_team.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_VIEW_TEAM = &Permission{
"view_team",
"authentication.permissions.view_team.name",
"authentication.permissions.view_team.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_LIST_USERS_WITHOUT_TEAM = &Permission{
"list_users_without_team",
"authentication.permissions.list_users_without_team.name",
"authentication.permissions.list_users_without_team.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_CREATE_USER_ACCESS_TOKEN = &Permission{
"create_user_access_token",
"authentication.permissions.create_user_access_token.name",
"authentication.permissions.create_user_access_token.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_READ_USER_ACCESS_TOKEN = &Permission{
"read_user_access_token",
"authentication.permissions.read_user_access_token.name",
"authentication.permissions.read_user_access_token.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_REVOKE_USER_ACCESS_TOKEN = &Permission{
"revoke_user_access_token",
"authentication.permissions.revoke_user_access_token.name",
"authentication.permissions.revoke_user_access_token.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_CREATE_BOT = &Permission{
"create_bot",
"authentication.permissions.create_bot.name",
"authentication.permissions.create_bot.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_ASSIGN_BOT = &Permission{
"assign_bot",
"authentication.permissions.assign_bot.name",
"authentication.permissions.assign_bot.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_READ_BOTS = &Permission{
"read_bots",
"authentication.permissions.read_bots.name",
"authentication.permissions.read_bots.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_READ_OTHERS_BOTS = &Permission{
"read_others_bots",
"authentication.permissions.read_others_bots.name",
"authentication.permissions.read_others_bots.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_MANAGE_BOTS = &Permission{
"manage_bots",
"authentication.permissions.manage_bots.name",
"authentication.permissions.manage_bots.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_MANAGE_OTHERS_BOTS = &Permission{
"manage_others_bots",
"authentication.permissions.manage_others_bots.name",
"authentication.permissions.manage_others_bots.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
+ }
+ PERMISSION_READ_JOBS = &Permission{
+ "read_jobs",
+ "authentication.permisssions.read_jobs.name",
+ "authentication.permisssions.read_jobs.description",
+ PermissionScopeSystem,
}
PERMISSION_MANAGE_JOBS = &Permission{
"manage_jobs",
"authentication.permisssions.manage_jobs.name",
"authentication.permisssions.manage_jobs.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
PERMISSION_VIEW_MEMBERS = &Permission{
"view_members",
"authentication.permisssions.view_members.name",
"authentication.permisssions.view_members.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_INVITE_GUEST = &Permission{
"invite_guest",
"authentication.permissions.invite_guest.name",
"authentication.permissions.invite_guest.description",
- PERMISSION_SCOPE_TEAM,
+ PermissionScopeTeam,
}
PERMISSION_PROMOTE_GUEST = &Permission{
"promote_guest",
"authentication.permissions.promote_guest.name",
"authentication.permissions.promote_guest.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
-
PERMISSION_DEMOTE_TO_GUEST = &Permission{
"demote_to_guest",
"authentication.permissions.demote_to_guest.name",
"authentication.permissions.demote_to_guest.description",
- PERMISSION_SCOPE_SYSTEM,
+ PermissionScopeSystem,
}
-
PERMISSION_USE_CHANNEL_MENTIONS = &Permission{
"use_channel_mentions",
"authentication.permissions.use_channel_mentions.name",
"authentication.permissions.use_channel_mentions.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
}
-
PERMISSION_USE_GROUP_MENTIONS = &Permission{
"use_group_mentions",
"authentication.permissions.use_group_mentions.name",
"authentication.permissions.use_group_mentions.description",
- PERMISSION_SCOPE_CHANNEL,
+ PermissionScopeChannel,
+ }
+ PERMISSION_READ_OTHER_USERS_TEAMS = &Permission{
+ "read_other_users_teams",
+ "authentication.permissions.read_other_users_teams.name",
+ "authentication.permissions.read_other_users_teams.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_EDIT_BRAND = &Permission{
+ "edit_brand",
+ "authentication.permissions.edit_brand.name",
+ "authentication.permissions.edit_brand.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_READ_ABOUT = &Permission{
+ "sysconsole_read_about",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_WRITE_ABOUT = &Permission{
+ "sysconsole_write_about",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_READ_REPORTING = &Permission{
+ "sysconsole_read_reporting",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_WRITE_REPORTING = &Permission{
+ "sysconsole_write_reporting",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_USERS = &Permission{
+ "sysconsole_read_user_management_users",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_USERS = &Permission{
+ "sysconsole_write_user_management_users",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_GROUPS = &Permission{
+ "sysconsole_read_user_management_groups",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_GROUPS = &Permission{
+ "sysconsole_write_user_management_groups",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS = &Permission{
+ "sysconsole_read_user_management_teams",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_TEAMS = &Permission{
+ "sysconsole_write_user_management_teams",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS = &Permission{
+ "sysconsole_read_user_management_channels",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_CHANNELS = &Permission{
+ "sysconsole_write_user_management_channels",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_PERMISSIONS = &Permission{
+ "sysconsole_read_user_management_permissions",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_PERMISSIONS = &Permission{
+ "sysconsole_write_user_management_permissions",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_READ_ENVIRONMENT = &Permission{
+ "sysconsole_read_environment",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_WRITE_ENVIRONMENT = &Permission{
+ "sysconsole_write_environment",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_READ_SITE = &Permission{
+ "sysconsole_read_site",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_WRITE_SITE = &Permission{
+ "sysconsole_write_site",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_READ_AUTHENTICATION = &Permission{
+ "sysconsole_read_authentication",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_WRITE_AUTHENTICATION = &Permission{
+ "sysconsole_write_authentication",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_READ_PLUGINS = &Permission{
+ "sysconsole_read_plugins",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_WRITE_PLUGINS = &Permission{
+ "sysconsole_write_plugins",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_READ_INTEGRATIONS = &Permission{
+ "sysconsole_read_integrations",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_WRITE_INTEGRATIONS = &Permission{
+ "sysconsole_write_integrations",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_READ_COMPLIANCE = &Permission{
+ "sysconsole_read_compliance",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_WRITE_COMPLIANCE = &Permission{
+ "sysconsole_write_compliance",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_READ_PLUGINS = &Permission{
+ "sysconsole_read_plugins",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_WRITE_PLUGINS = &Permission{
+ "sysconsole_write_plugins",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_READ_EXPERIMENTAL = &Permission{
+ "sysconsole_read_experimental",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
+ }
+ PERMISSION_SYSCONSOLE_WRITE_EXPERIMENTAL = &Permission{
+ "sysconsole_write_experimental",
+ "authentication.permissions.use_group_mentions.name",
+ "authentication.permissions.use_group_mentions.description",
+ PermissionScopeSystem,
}
- ALL_PERMISSIONS = []*Permission{
- PERMISSION_INVITE_USER,
- PERMISSION_ADD_USER_TO_TEAM,
- PERMISSION_USE_SLASH_COMMANDS,
- PERMISSION_MANAGE_SLASH_COMMANDS,
- PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS,
- PERMISSION_CREATE_PUBLIC_CHANNEL,
- PERMISSION_CREATE_PRIVATE_CHANNEL,
- PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS,
- PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS,
+ SysconsoleReadPermissions = []*Permission{
+ PERMISSION_SYSCONSOLE_READ_ABOUT,
+ PERMISSION_SYSCONSOLE_READ_REPORTING,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_USERS,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_GROUPS,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_PERMISSIONS,
+ PERMISSION_SYSCONSOLE_READ_ENVIRONMENT,
+ PERMISSION_SYSCONSOLE_READ_SITE,
+ PERMISSION_SYSCONSOLE_READ_AUTHENTICATION,
+ PERMISSION_SYSCONSOLE_READ_PLUGINS,
+ PERMISSION_SYSCONSOLE_READ_INTEGRATIONS,
+ PERMISSION_SYSCONSOLE_READ_COMPLIANCE,
+ PERMISSION_SYSCONSOLE_READ_EXPERIMENTAL,
+ }
+
+ SysconsoleWritePermissions = []*Permission{
+ PERMISSION_SYSCONSOLE_WRITE_ABOUT,
+ PERMISSION_SYSCONSOLE_WRITE_REPORTING,
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_USERS,
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_GROUPS,
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_TEAMS,
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_CHANNELS,
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_PERMISSIONS,
+ PERMISSION_SYSCONSOLE_WRITE_ENVIRONMENT,
+ PERMISSION_SYSCONSOLE_WRITE_SITE,
+ PERMISSION_SYSCONSOLE_WRITE_AUTHENTICATION,
+ PERMISSION_SYSCONSOLE_WRITE_PLUGINS,
+ PERMISSION_SYSCONSOLE_WRITE_INTEGRATIONS,
+ PERMISSION_SYSCONSOLE_WRITE_COMPLIANCE,
+ PERMISSION_SYSCONSOLE_WRITE_EXPERIMENTAL,
+ }
+
+ SystemScopedPermissionsMinusSysconsole := []*Permission{
PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE,
PERMISSION_MANAGE_ROLES,
- PERMISSION_MANAGE_TEAM_ROLES,
- PERMISSION_MANAGE_CHANNEL_ROLES,
+ PERMISSION_MANAGE_SYSTEM,
PERMISSION_CREATE_DIRECT_CHANNEL,
PERMISSION_CREATE_GROUP_CHANNEL,
- PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES,
- PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES,
PERMISSION_LIST_PUBLIC_TEAMS,
PERMISSION_JOIN_PUBLIC_TEAMS,
PERMISSION_LIST_PRIVATE_TEAMS,
PERMISSION_JOIN_PRIVATE_TEAMS,
+ PERMISSION_EDIT_OTHER_USERS,
+ PERMISSION_READ_OTHER_USERS_TEAMS,
+ PERMISSION_GET_PUBLIC_LINK,
+ PERMISSION_MANAGE_OAUTH,
+ PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH,
+ PERMISSION_CREATE_TEAM,
+ PERMISSION_LIST_USERS_WITHOUT_TEAM,
+ PERMISSION_CREATE_USER_ACCESS_TOKEN,
+ PERMISSION_READ_USER_ACCESS_TOKEN,
+ PERMISSION_REVOKE_USER_ACCESS_TOKEN,
+ PERMISSION_CREATE_BOT,
+ PERMISSION_ASSIGN_BOT,
+ PERMISSION_READ_BOTS,
+ PERMISSION_READ_OTHERS_BOTS,
+ PERMISSION_MANAGE_BOTS,
+ PERMISSION_MANAGE_OTHERS_BOTS,
+ PERMISSION_READ_JOBS,
+ PERMISSION_MANAGE_JOBS,
+ PERMISSION_PROMOTE_GUEST,
+ PERMISSION_DEMOTE_TO_GUEST,
+ PERMISSION_EDIT_BRAND,
+ }
+
+ TeamScopedPermissions := []*Permission{
+ PERMISSION_INVITE_USER,
+ PERMISSION_ADD_USER_TO_TEAM,
+ PERMISSION_MANAGE_SLASH_COMMANDS,
+ PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS,
+ PERMISSION_CREATE_PUBLIC_CHANNEL,
+ PERMISSION_CREATE_PRIVATE_CHANNEL,
+ PERMISSION_MANAGE_TEAM_ROLES,
PERMISSION_LIST_TEAM_CHANNELS,
PERMISSION_JOIN_PUBLIC_CHANNELS,
- PERMISSION_DELETE_PUBLIC_CHANNEL,
- PERMISSION_DELETE_PRIVATE_CHANNEL,
- PERMISSION_EDIT_OTHER_USERS,
- PERMISSION_READ_CHANNEL,
PERMISSION_READ_PUBLIC_CHANNEL,
- PERMISSION_ADD_REACTION,
- PERMISSION_REMOVE_REACTION,
- PERMISSION_REMOVE_OTHERS_REACTIONS,
- PERMISSION_PERMANENT_DELETE_USER,
- PERMISSION_UPLOAD_FILE,
- PERMISSION_GET_PUBLIC_LINK,
- PERMISSION_MANAGE_WEBHOOKS,
- PERMISSION_MANAGE_OTHERS_WEBHOOKS,
PERMISSION_MANAGE_INCOMING_WEBHOOKS,
PERMISSION_MANAGE_OUTGOING_WEBHOOKS,
PERMISSION_MANAGE_OTHERS_INCOMING_WEBHOOKS,
PERMISSION_MANAGE_OTHERS_OUTGOING_WEBHOOKS,
- PERMISSION_MANAGE_OAUTH,
- PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH,
- PERMISSION_MANAGE_EMOJIS,
- PERMISSION_MANAGE_OTHERS_EMOJIS,
PERMISSION_CREATE_EMOJIS,
PERMISSION_DELETE_EMOJIS,
PERMISSION_DELETE_OTHERS_EMOJIS,
+ PERMISSION_REMOVE_USER_FROM_TEAM,
+ PERMISSION_MANAGE_TEAM,
+ PERMISSION_IMPORT_TEAM,
+ PERMISSION_VIEW_TEAM,
+ PERMISSION_VIEW_MEMBERS,
+ PERMISSION_INVITE_GUEST,
+ }
+
+ ChannelScopedPermissions := []*Permission{
+ PERMISSION_USE_SLASH_COMMANDS,
+ PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS,
+ PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS,
+ PERMISSION_MANAGE_CHANNEL_ROLES,
+ PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES,
+ PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES,
+ PERMISSION_CONVERT_PUBLIC_CHANNEL_TO_PRIVATE,
+ PERMISSION_CONVERT_PRIVATE_CHANNEL_TO_PUBLIC,
+ PERMISSION_DELETE_PUBLIC_CHANNEL,
+ PERMISSION_DELETE_PRIVATE_CHANNEL,
+ PERMISSION_READ_CHANNEL,
+ PERMISSION_READ_PUBLIC_CHANNEL_GROUPS,
+ PERMISSION_READ_PRIVATE_CHANNEL_GROUPS,
+ PERMISSION_ADD_REACTION,
+ PERMISSION_REMOVE_REACTION,
+ PERMISSION_REMOVE_OTHERS_REACTIONS,
+ PERMISSION_UPLOAD_FILE,
PERMISSION_CREATE_POST,
PERMISSION_CREATE_POST_PUBLIC,
PERMISSION_CREATE_POST_EPHEMERAL,
@@ -630,44 +965,39 @@ func initializePermissions() {
PERMISSION_EDIT_OTHERS_POSTS,
PERMISSION_DELETE_POST,
PERMISSION_DELETE_OTHERS_POSTS,
- PERMISSION_REMOVE_USER_FROM_TEAM,
- PERMISSION_CREATE_TEAM,
- PERMISSION_MANAGE_TEAM,
- PERMISSION_IMPORT_TEAM,
- PERMISSION_VIEW_TEAM,
- PERMISSION_LIST_USERS_WITHOUT_TEAM,
- PERMISSION_MANAGE_JOBS,
- PERMISSION_CREATE_USER_ACCESS_TOKEN,
- PERMISSION_READ_USER_ACCESS_TOKEN,
- PERMISSION_REVOKE_USER_ACCESS_TOKEN,
- PERMISSION_CREATE_BOT,
- PERMISSION_READ_BOTS,
- PERMISSION_READ_OTHERS_BOTS,
- PERMISSION_MANAGE_BOTS,
- PERMISSION_MANAGE_OTHERS_BOTS,
- PERMISSION_MANAGE_SYSTEM,
- PERMISSION_VIEW_MEMBERS,
- PERMISSION_INVITE_GUEST,
- PERMISSION_PROMOTE_GUEST,
- PERMISSION_DEMOTE_TO_GUEST,
PERMISSION_USE_CHANNEL_MENTIONS,
PERMISSION_USE_GROUP_MENTIONS,
}
- CHANNEL_MODERATED_PERMISSIONS = []string{
+ DeprecatedPermissions = []*Permission{
+ PERMISSION_PERMANENT_DELETE_USER,
+ PERMISSION_MANAGE_WEBHOOKS,
+ PERMISSION_MANAGE_OTHERS_WEBHOOKS,
+ PERMISSION_MANAGE_EMOJIS,
+ PERMISSION_MANAGE_OTHERS_EMOJIS,
+ }
+
+ AllPermissions = []*Permission{}
+ AllPermissions = append(AllPermissions, SystemScopedPermissionsMinusSysconsole...)
+ AllPermissions = append(AllPermissions, TeamScopedPermissions...)
+ AllPermissions = append(AllPermissions, ChannelScopedPermissions...)
+ AllPermissions = append(AllPermissions, SysconsoleReadPermissions...)
+ AllPermissions = append(AllPermissions, SysconsoleWritePermissions...)
+
+ ChannelModeratedPermissions = []string{
PERMISSION_CREATE_POST.Id,
"create_reactions",
"manage_members",
PERMISSION_USE_CHANNEL_MENTIONS.Id,
}
- CHANNEL_MODERATED_PERMISSIONS_MAP = map[string]string{
- PERMISSION_CREATE_POST.Id: CHANNEL_MODERATED_PERMISSIONS[0],
- PERMISSION_ADD_REACTION.Id: CHANNEL_MODERATED_PERMISSIONS[1],
- PERMISSION_REMOVE_REACTION.Id: CHANNEL_MODERATED_PERMISSIONS[1],
- PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id: CHANNEL_MODERATED_PERMISSIONS[2],
- PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS.Id: CHANNEL_MODERATED_PERMISSIONS[2],
- PERMISSION_USE_CHANNEL_MENTIONS.Id: CHANNEL_MODERATED_PERMISSIONS[3],
+ ChannelModeratedPermissionsMap = map[string]string{
+ PERMISSION_CREATE_POST.Id: ChannelModeratedPermissions[0],
+ PERMISSION_ADD_REACTION.Id: ChannelModeratedPermissions[1],
+ PERMISSION_REMOVE_REACTION.Id: ChannelModeratedPermissions[1],
+ PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id: ChannelModeratedPermissions[2],
+ PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS.Id: ChannelModeratedPermissions[2],
+ PERMISSION_USE_CHANNEL_MENTIONS.Id: ChannelModeratedPermissions[3],
}
}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/post.go b/vendor/github.com/mattermost/mattermost-server/v5/model/post.go
index 817ca08a..6e29ba3e 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/post.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/post.go
@@ -64,6 +64,7 @@ const (
POST_PROPS_MENTION_HIGHLIGHT_DISABLED = "mentionHighlightDisabled"
POST_PROPS_GROUP_HIGHLIGHT_DISABLED = "disable_group_highlight"
+ POST_SYSTEM_WARN_METRIC_STATUS = "warn_metric_status"
)
var AT_MENTION_PATTEN = regexp.MustCompile(`\B@`)
@@ -312,7 +313,8 @@ func (o *Post) IsValid(maxPostSize int) *AppError {
POST_CHANNEL_RESTORED,
POST_CHANGE_CHANNEL_PRIVACY,
POST_ME,
- POST_ADD_BOT_TEAMS_CHANNELS:
+ POST_ADD_BOT_TEAMS_CHANNELS,
+ POST_SYSTEM_WARN_METRIC_STATUS:
default:
if !strings.HasPrefix(o.Type, POST_CUSTOM_TYPE_PREFIX) {
return NewAppError("Post.IsValid", "model.post.is_valid.type.app_error", nil, "id="+o.Type, http.StatusBadRequest)
@@ -495,15 +497,14 @@ func (o *SearchParameter) SearchParameterToJson() string {
return string(b)
}
-func SearchParameterFromJson(data io.Reader) *SearchParameter {
+func SearchParameterFromJson(data io.Reader) (*SearchParameter, error) {
decoder := json.NewDecoder(data)
var searchParam SearchParameter
- err := decoder.Decode(&searchParam)
- if err != nil {
- return nil
+ if err := decoder.Decode(&searchParam); err != nil {
+ return nil, err
}
- return &searchParam
+ return &searchParam, nil
}
func (o *Post) ChannelMentions() []string {
@@ -521,6 +522,9 @@ func (o *Post) DisableMentionHighlights() string {
// DisableMentionHighlights disables mention highlighting for a post patch if required.
func (o *PostPatch) DisableMentionHighlights() {
+ if o.Message == nil {
+ return
+ }
if _, hasMentions := findAtChannelMention(*o.Message); hasMentions {
if o.Props == nil {
o.Props = &StringInterface{}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/preference.go b/vendor/github.com/mattermost/mattermost-server/v5/model/preference.go
index 346f88f8..e752bb54 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/preference.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/preference.go
@@ -14,6 +14,7 @@ import (
const (
PREFERENCE_CATEGORY_DIRECT_CHANNEL_SHOW = "direct_channel_show"
+ PREFERENCE_CATEGORY_GROUP_CHANNEL_SHOW = "group_channel_show"
PREFERENCE_CATEGORY_TUTORIAL_STEPS = "tutorial_step"
PREFERENCE_CATEGORY_ADVANCED_SETTINGS = "advanced_settings"
PREFERENCE_CATEGORY_FLAGGED_POST = "flagged_post"
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/product_notices.go b/vendor/github.com/mattermost/mattermost-server/v5/model/product_notices.go
new file mode 100644
index 00000000..6aa88fc7
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/product_notices.go
@@ -0,0 +1,213 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See LICENSE.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "github.com/pkg/errors"
+ "io"
+)
+
+type ProductNotices []ProductNotice
+
+func (r *ProductNotices) Marshal() ([]byte, error) {
+ return json.Marshal(r)
+}
+
+func UnmarshalProductNotices(data []byte) (ProductNotices, error) {
+ var r ProductNotices
+ err := json.Unmarshal(data, &r)
+ return r, err
+}
+
+// List of product notices. Order is important and is used to resolve priorities.
+// Each notice will only be show if conditions are met.
+type ProductNotice struct {
+ Conditions Conditions `json:"conditions"`
+ ID string `json:"id"` // Unique identifier for this notice. Can be a running number. Used for storing 'viewed'; state on the server.
+ LocalizedMessages map[string]NoticeMessageInternal `json:"localizedMessages"` // Notice message data, organized by locale.; Example:; "localizedMessages": {; "en": { "title": "English", description: "English description"},; "frFR": { "title": "Frances", description: "French description"}; }
+ Repeatable *bool `json:"repeatable,omitempty"` // Configurable flag if the notice should reappear after it’s seen and dismissed
+}
+
+func (n *ProductNotice) SysAdminOnly() bool {
+ return n.Conditions.Audience != nil && *n.Conditions.Audience == NoticeAudience_Sysadmin
+}
+
+func (n *ProductNotice) TeamAdminOnly() bool {
+ return n.Conditions.Audience != nil && *n.Conditions.Audience == NoticeAudience_TeamAdmin
+}
+
+type Conditions struct {
+ Audience *NoticeAudience `json:"audience,omitempty"`
+ ClientType *NoticeClientType `json:"clientType,omitempty"` // Only show the notice on specific clients. Defaults to 'all'
+ DesktopVersion []string `json:"desktopVersion,omitempty"` // What desktop client versions does this notice apply to.; Format: semver ranges (https://devhints.io/semver); Example: [">=1.2.3 < ~2.4.x"]; Example: ["<v5.19", "v5.20-v5.22"]
+ DisplayDate *string `json:"displayDate,omitempty"` // When to display the notice.; Examples:; "2020-03-01T00:00:00Z" - show on specified date; ">= 2020-03-01T00:00:00Z" - show after specified date; "< 2020-03-01T00:00:00Z" - show before the specified date; "> 2020-03-01T00:00:00Z <= 2020-04-01T00:00:00Z" - show only between the specified dates
+ InstanceType *NoticeInstanceType `json:"instanceType,omitempty"`
+ MobileVersion []string `json:"mobileVersion,omitempty"` // What mobile client versions does this notice apply to.; Format: semver ranges (https://devhints.io/semver); Example: [">=1.2.3 < ~2.4.x"]; Example: ["<v5.19", "v5.20-v5.22"]
+ NumberOfPosts *int64 `json:"numberOfPosts,omitempty"` // Only show the notice when server has more than specified number of posts
+ NumberOfUsers *int64 `json:"numberOfUsers,omitempty"` // Only show the notice when server has more than specified number of users
+ ServerConfig map[string]interface{} `json:"serverConfig,omitempty"` // Map of mattermost server config paths and their values. Notice will be displayed only if; the values match the target server config; Example: serverConfig: { "PluginSettings.Enable": true, "GuestAccountsSettings.Enable":; false }
+ ServerVersion []string `json:"serverVersion,omitempty"` // What server versions does this notice apply to.; Format: semver ranges (https://devhints.io/semver); Example: [">=1.2.3 < ~2.4.x"]; Example: ["<v5.19", "v5.20-v5.22"]
+ Sku *NoticeSKU `json:"sku,omitempty"`
+ UserConfig map[string]interface{} `json:"userConfig,omitempty"` // Map of user's settings and their values. Notice will be displayed only if the values; match the viewing users' config; Example: userConfig: { "new_sidebar.disabled": true }
+}
+
+type NoticeMessageInternal struct {
+ Action *NoticeAction `json:"action,omitempty"` // Optional action to perform on action button click. (defaults to closing the notice)
+ ActionParam *string `json:"actionParam,omitempty"` // Optional action parameter.; Example: {"action": "url", actionParam: "/console/some-page"}
+ ActionText *string `json:"actionText,omitempty"` // Optional override for the action button text (defaults to OK)
+ Description string `json:"description"` // Notice content. Use {{Mattermost}} instead of plain text to support white-labeling. Text; supports Markdown.
+ Image *string `json:"image,omitempty"`
+ Title string `json:"title"` // Notice title. Use {{Mattermost}} instead of plain text to support white-labeling. Text; supports Markdown.
+}
+type NoticeMessages []NoticeMessage
+
+type NoticeMessage struct {
+ NoticeMessageInternal
+ ID string `json:"id"`
+ SysAdminOnly bool `json:"sysAdminOnly"`
+ TeamAdminOnly bool `json:"teamAdminOnly"`
+}
+
+func (r *NoticeMessages) Marshal() ([]byte, error) {
+ return json.Marshal(r)
+}
+
+func UnmarshalProductNoticeMessages(data io.Reader) (NoticeMessages, error) {
+ var r NoticeMessages
+ err := json.NewDecoder(data).Decode(&r)
+ return r, err
+}
+
+// User role, i.e. who will see the notice. Defaults to "all"
+type NoticeAudience string
+
+func NewNoticeAudience(s NoticeAudience) *NoticeAudience {
+ return &s
+}
+
+func (a *NoticeAudience) Matches(sysAdmin bool, teamAdmin bool) bool {
+ switch *a {
+ case NoticeAudience_All:
+ return true
+ case NoticeAudience_Member:
+ return !sysAdmin && !teamAdmin
+ case NoticeAudience_Sysadmin:
+ return sysAdmin
+ case NoticeAudience_TeamAdmin:
+ return teamAdmin
+ }
+ return false
+}
+
+const (
+ NoticeAudience_All NoticeAudience = "all"
+ NoticeAudience_Member NoticeAudience = "member"
+ NoticeAudience_Sysadmin NoticeAudience = "sysadmin"
+ NoticeAudience_TeamAdmin NoticeAudience = "teamadmin"
+)
+
+// Only show the notice on specific clients. Defaults to 'all'
+//
+// Client type. Defaults to "all"
+type NoticeClientType string
+
+func NewNoticeClientType(s NoticeClientType) *NoticeClientType { return &s }
+
+func (c *NoticeClientType) Matches(other NoticeClientType) bool {
+ switch *c {
+ case NoticeClientType_All:
+ return true
+ case NoticeClientType_Mobile:
+ return other == NoticeClientType_MobileIos || other == NoticeClientType_MobileAndroid
+ default:
+ return *c == other
+ }
+}
+
+const (
+ NoticeClientType_All NoticeClientType = "all"
+ NoticeClientType_Desktop NoticeClientType = "desktop"
+ NoticeClientType_Mobile NoticeClientType = "mobile"
+ NoticeClientType_MobileAndroid NoticeClientType = "mobile-android"
+ NoticeClientType_MobileIos NoticeClientType = "mobile-ios"
+ NoticeClientType_Web NoticeClientType = "web"
+)
+
+func NoticeClientTypeFromString(s string) (NoticeClientType, error) {
+ switch s {
+ case "web":
+ return NoticeClientType_Web, nil
+ case "mobile-ios":
+ return NoticeClientType_MobileIos, nil
+ case "mobile-android":
+ return NoticeClientType_MobileAndroid, nil
+ case "desktop":
+ return NoticeClientType_Desktop, nil
+ }
+ return NoticeClientType_All, errors.New("Invalid client type supplied")
+}
+
+// Instance type. Defaults to "both"
+type NoticeInstanceType string
+
+func NewNoticeInstanceType(n NoticeInstanceType) *NoticeInstanceType { return &n }
+func (t *NoticeInstanceType) Matches(isCloud bool) bool {
+ if *t == NoticeInstanceType_Both {
+ return true
+ }
+ if *t == NoticeInstanceType_Cloud && !isCloud {
+ return false
+ }
+ if *t == NoticeInstanceType_OnPrem && isCloud {
+ return false
+ }
+ return true
+}
+
+const (
+ NoticeInstanceType_Both NoticeInstanceType = "both"
+ NoticeInstanceType_Cloud NoticeInstanceType = "cloud"
+ NoticeInstanceType_OnPrem NoticeInstanceType = "onprem"
+)
+
+// SKU. Defaults to "all"
+type NoticeSKU string
+
+func NewNoticeSKU(s NoticeSKU) *NoticeSKU { return &s }
+func (c *NoticeSKU) Matches(s string) bool {
+ switch *c {
+ case NoticeSKU_All:
+ return true
+ case NoticeSKU_E0, NoticeSKU_Team:
+ return s == ""
+ default:
+ return s == string(*c)
+ }
+}
+
+const (
+ NoticeSKU_E0 NoticeSKU = "e0"
+ NoticeSKU_E10 NoticeSKU = "e10"
+ NoticeSKU_E20 NoticeSKU = "e20"
+ NoticeSKU_All NoticeSKU = "all"
+ NoticeSKU_Team NoticeSKU = "team"
+)
+
+// Optional action to perform on action button click. (defaults to closing the notice)
+//
+// Possible actions to execute on button press
+type NoticeAction string
+
+const (
+ URL NoticeAction = "url"
+)
+
+// Definition of the table keeping the 'viewed' state of each in-product notice per user
+type ProductNoticeViewState struct {
+ UserId string
+ NoticeId string
+ Viewed int32
+ Timestamp int64
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/push_notification.go b/vendor/github.com/mattermost/mattermost-server/v5/model/push_notification.go
index 5b0118ce..80945905 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/push_notification.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/push_notification.go
@@ -19,6 +19,7 @@ const (
PUSH_TYPE_MESSAGE = "message"
PUSH_TYPE_CLEAR = "clear"
PUSH_TYPE_UPDATE_BADGE = "update_badge"
+ PUSH_TYPE_SESSION = "session"
PUSH_MESSAGE_V2 = "v2"
PUSH_SOUND_NONE = "none"
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/role.go b/vendor/github.com/mattermost/mattermost-server/v5/model/role.go
index 38ac1ef7..e880a1d8 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/role.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/role.go
@@ -9,10 +9,24 @@ import (
"strings"
)
+// SysconsoleAncillaryPermissions maps the non-sysconsole permissions required by each sysconsole view.
+var SysconsoleAncillaryPermissions map[string][]*Permission
+var SystemManagerDefaultPermissions []string
+var SystemUserManagerDefaultPermissions []string
+var SystemReadOnlyAdminDefaultPermissions []string
+
var BuiltInSchemeManagedRoleIDs []string
+var NewSystemRoleIDs []string
+
func init() {
- BuiltInSchemeManagedRoleIDs = []string{
+ NewSystemRoleIDs = []string{
+ SYSTEM_USER_MANAGER_ROLE_ID,
+ SYSTEM_READ_ONLY_ADMIN_ROLE_ID,
+ SYSTEM_MANAGER_ROLE_ID,
+ }
+
+ BuiltInSchemeManagedRoleIDs = append([]string{
SYSTEM_GUEST_ROLE_ID,
SYSTEM_USER_ROLE_ID,
SYSTEM_ADMIN_ROLE_ID,
@@ -29,7 +43,125 @@ func init() {
CHANNEL_GUEST_ROLE_ID,
CHANNEL_USER_ROLE_ID,
CHANNEL_ADMIN_ROLE_ID,
+ }, NewSystemRoleIDs...)
+
+ // When updating the values here, the values in mattermost-redux must also be updated.
+ SysconsoleAncillaryPermissions = map[string][]*Permission{
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS.Id: {
+ PERMISSION_READ_PUBLIC_CHANNEL,
+ PERMISSION_READ_CHANNEL,
+ PERMISSION_READ_PUBLIC_CHANNEL_GROUPS,
+ PERMISSION_READ_PRIVATE_CHANNEL_GROUPS,
+ },
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_USERS.Id: {
+ PERMISSION_READ_OTHER_USERS_TEAMS,
+ },
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS.Id: {
+ PERMISSION_LIST_PRIVATE_TEAMS,
+ PERMISSION_LIST_PUBLIC_TEAMS,
+ PERMISSION_VIEW_TEAM,
+ },
+ PERMISSION_SYSCONSOLE_READ_ENVIRONMENT.Id: {
+ PERMISSION_READ_JOBS,
+ },
+ PERMISSION_SYSCONSOLE_READ_AUTHENTICATION.Id: {
+ PERMISSION_READ_JOBS,
+ },
+ PERMISSION_SYSCONSOLE_READ_REPORTING.Id: {
+ PERMISSION_VIEW_TEAM,
+ },
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_USERS.Id: {
+ PERMISSION_EDIT_OTHER_USERS,
+ PERMISSION_DEMOTE_TO_GUEST,
+ PERMISSION_PROMOTE_GUEST,
+ },
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_CHANNELS.Id: {
+ PERMISSION_MANAGE_TEAM,
+ PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES,
+ PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES,
+ PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS,
+ PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS,
+ PERMISSION_DELETE_PRIVATE_CHANNEL,
+ PERMISSION_DELETE_PUBLIC_CHANNEL,
+ PERMISSION_MANAGE_CHANNEL_ROLES,
+ PERMISSION_CONVERT_PUBLIC_CHANNEL_TO_PRIVATE,
+ PERMISSION_CONVERT_PRIVATE_CHANNEL_TO_PUBLIC,
+ },
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_TEAMS.Id: {
+ PERMISSION_MANAGE_TEAM,
+ PERMISSION_MANAGE_TEAM_ROLES,
+ PERMISSION_REMOVE_USER_FROM_TEAM,
+ PERMISSION_JOIN_PRIVATE_TEAMS,
+ PERMISSION_JOIN_PUBLIC_TEAMS,
+ PERMISSION_ADD_USER_TO_TEAM,
+ },
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_GROUPS.Id: {
+ PERMISSION_MANAGE_TEAM,
+ PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS,
+ PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS,
+ PERMISSION_CONVERT_PUBLIC_CHANNEL_TO_PRIVATE,
+ PERMISSION_CONVERT_PRIVATE_CHANNEL_TO_PUBLIC,
+ },
+ PERMISSION_SYSCONSOLE_WRITE_ENVIRONMENT.Id: {
+ PERMISSION_MANAGE_JOBS,
+ },
+ PERMISSION_SYSCONSOLE_WRITE_SITE.Id: {
+ PERMISSION_EDIT_BRAND,
+ },
}
+
+ SystemUserManagerDefaultPermissions = []string{
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_GROUPS.Id,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS.Id,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS.Id,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_PERMISSIONS.Id,
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_GROUPS.Id,
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_TEAMS.Id,
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_CHANNELS.Id,
+ PERMISSION_SYSCONSOLE_READ_AUTHENTICATION.Id,
+ }
+
+ SystemReadOnlyAdminDefaultPermissions = []string{
+ PERMISSION_SYSCONSOLE_READ_ABOUT.Id,
+ PERMISSION_SYSCONSOLE_READ_REPORTING.Id,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_USERS.Id,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_GROUPS.Id,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS.Id,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS.Id,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_PERMISSIONS.Id,
+ PERMISSION_SYSCONSOLE_READ_ENVIRONMENT.Id,
+ PERMISSION_SYSCONSOLE_READ_SITE.Id,
+ PERMISSION_SYSCONSOLE_READ_AUTHENTICATION.Id,
+ PERMISSION_SYSCONSOLE_READ_PLUGINS.Id,
+ PERMISSION_SYSCONSOLE_READ_INTEGRATIONS.Id,
+ PERMISSION_SYSCONSOLE_READ_EXPERIMENTAL.Id,
+ }
+
+ SystemManagerDefaultPermissions = []string{
+ PERMISSION_SYSCONSOLE_READ_ABOUT.Id,
+ PERMISSION_SYSCONSOLE_READ_REPORTING.Id,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_GROUPS.Id,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_TEAMS.Id,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_CHANNELS.Id,
+ PERMISSION_SYSCONSOLE_READ_USERMANAGEMENT_PERMISSIONS.Id,
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_GROUPS.Id,
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_TEAMS.Id,
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_CHANNELS.Id,
+ PERMISSION_SYSCONSOLE_WRITE_USERMANAGEMENT_PERMISSIONS.Id,
+ PERMISSION_SYSCONSOLE_READ_ENVIRONMENT.Id,
+ PERMISSION_SYSCONSOLE_WRITE_ENVIRONMENT.Id,
+ PERMISSION_SYSCONSOLE_READ_SITE.Id,
+ PERMISSION_SYSCONSOLE_WRITE_SITE.Id,
+ PERMISSION_SYSCONSOLE_READ_AUTHENTICATION.Id,
+ PERMISSION_SYSCONSOLE_READ_PLUGINS.Id,
+ PERMISSION_SYSCONSOLE_READ_INTEGRATIONS.Id,
+ PERMISSION_SYSCONSOLE_WRITE_INTEGRATIONS.Id,
+ }
+
+ // Add the ancillary permissions to each system role
+ SystemUserManagerDefaultPermissions = addAncillaryPermissions(SystemUserManagerDefaultPermissions)
+ SystemReadOnlyAdminDefaultPermissions = addAncillaryPermissions(SystemReadOnlyAdminDefaultPermissions)
+ SystemManagerDefaultPermissions = addAncillaryPermissions(SystemManagerDefaultPermissions)
}
type RoleType string
@@ -42,6 +174,9 @@ const (
SYSTEM_POST_ALL_ROLE_ID = "system_post_all"
SYSTEM_POST_ALL_PUBLIC_ROLE_ID = "system_post_all_public"
SYSTEM_USER_ACCESS_TOKEN_ROLE_ID = "system_user_access_token"
+ SYSTEM_USER_MANAGER_ROLE_ID = "system_user_manager"
+ SYSTEM_READ_ONLY_ADMIN_ROLE_ID = "system_read_only_admin"
+ SYSTEM_MANAGER_ROLE_ID = "system_manager"
TEAM_GUEST_ROLE_ID = "team_guest"
TEAM_USER_ROLE_ID = "team_user"
@@ -135,8 +270,8 @@ func (r *Role) MergeChannelHigherScopedPermissions(higherScopedPermissions *Role
higherScopedPermissionsMap := AsStringBoolMap(higherScopedPermissions.Permissions)
rolePermissionsMap := AsStringBoolMap(r.Permissions)
- for _, cp := range ALL_PERMISSIONS {
- if cp.Scope != PERMISSION_SCOPE_CHANNEL {
+ for _, cp := range AllPermissions {
+ if cp.Scope != PermissionScopeChannel {
continue
}
@@ -150,7 +285,7 @@ func (r *Role) MergeChannelHigherScopedPermissions(higherScopedPermissions *Role
continue
}
- _, permissionIsModerated := CHANNEL_MODERATED_PERMISSIONS_MAP[cp.Id]
+ _, permissionIsModerated := ChannelModeratedPermissionsMap[cp.Id]
if permissionIsModerated {
_, presentOnRole := rolePermissionsMap[cp.Id]
if presentOnRole && presentOnHigherScope {
@@ -216,13 +351,13 @@ func ChannelModeratedPermissionsChangedByPatch(role *Role, patch *RolePatch) []s
patchMap := make(map[string]bool)
for _, permission := range role.Permissions {
- if channelModeratedPermissionName, found := CHANNEL_MODERATED_PERMISSIONS_MAP[permission]; found {
+ if channelModeratedPermissionName, found := ChannelModeratedPermissionsMap[permission]; found {
roleMap[channelModeratedPermissionName] = true
}
}
for _, permission := range *patch.Permissions {
- if channelModeratedPermissionName, found := CHANNEL_MODERATED_PERMISSIONS_MAP[permission]; found {
+ if channelModeratedPermissionName, found := ChannelModeratedPermissionsMap[permission]; found {
patchMap[channelModeratedPermissionName] = true
}
}
@@ -246,11 +381,11 @@ func ChannelModeratedPermissionsChangedByPatch(role *Role, patch *RolePatch) []s
func (r *Role) GetChannelModeratedPermissions(channelType string) map[string]bool {
moderatedPermissions := make(map[string]bool)
for _, permission := range r.Permissions {
- if _, found := CHANNEL_MODERATED_PERMISSIONS_MAP[permission]; !found {
+ if _, found := ChannelModeratedPermissionsMap[permission]; !found {
continue
}
- for moderated, moderatedPermissionValue := range CHANNEL_MODERATED_PERMISSIONS_MAP {
+ for moderated, moderatedPermissionValue := range ChannelModeratedPermissionsMap {
// the moderated permission has already been found to be true so skip this iteration
if moderatedPermissions[moderatedPermissionValue] {
continue
@@ -279,14 +414,14 @@ func (r *Role) RolePatchFromChannelModerationsPatch(channelModerationsPatch []*C
// Iterate through the list of existing permissions on the role and append permissions that we want to keep.
for _, permission := range r.Permissions {
// Permission is not moderated so dont add it to the patch and skip the channelModerationsPatch
- if _, isModerated := CHANNEL_MODERATED_PERMISSIONS_MAP[permission]; !isModerated {
+ if _, isModerated := ChannelModeratedPermissionsMap[permission]; !isModerated {
continue
}
permissionEnabled := true
// Check if permission has a matching moderated permission name inside the channel moderation patch
for _, channelModerationPatch := range channelModerationsPatch {
- if *channelModerationPatch.Name == CHANNEL_MODERATED_PERMISSIONS_MAP[permission] {
+ if *channelModerationPatch.Name == ChannelModeratedPermissionsMap[permission] {
// Permission key exists in patch with a value of false so skip over it
if roleName == "members" {
if channelModerationPatch.Roles.Members != nil && !*channelModerationPatch.Roles.Members {
@@ -307,7 +442,7 @@ func (r *Role) RolePatchFromChannelModerationsPatch(channelModerationsPatch []*C
// Iterate through the patch and add any permissions that dont already exist on the role
for _, channelModerationPatch := range channelModerationsPatch {
- for permission, moderatedPermissionName := range CHANNEL_MODERATED_PERMISSIONS_MAP {
+ for permission, moderatedPermissionName := range ChannelModeratedPermissionsMap {
if roleName == "members" && channelModerationPatch.Roles.Members != nil && *channelModerationPatch.Roles.Members && *channelModerationPatch.Name == moderatedPermissionName {
permissionsToAddToPatch[permission] = true
}
@@ -349,7 +484,7 @@ func (r *Role) IsValidWithoutId() bool {
for _, permission := range r.Permissions {
permissionValidated := false
- for _, p := range ALL_PERMISSIONS {
+ for _, p := range append(AllPermissions, DeprecatedPermissions...) {
if permission == p.Id {
permissionValidated = true
break
@@ -364,6 +499,23 @@ func (r *Role) IsValidWithoutId() bool {
return true
}
+func CleanRoleNames(roleNames []string) ([]string, bool) {
+ var cleanedRoleNames []string
+ for _, roleName := range roleNames {
+ if strings.TrimSpace(roleName) == "" {
+ continue
+ }
+
+ if !IsValidRoleName(roleName) {
+ return roleNames, false
+ }
+
+ cleanedRoleNames = append(cleanedRoleNames, roleName)
+ }
+
+ return cleanedRoleNames, true
+}
+
func IsValidRoleName(roleName string) bool {
if len(roleName) <= 0 || len(roleName) > ROLE_NAME_MAX_LENGTH {
return false
@@ -493,6 +645,8 @@ func MakeDefaultRoles() map[string]*Role {
PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS.Id,
PERMISSION_MANAGE_INCOMING_WEBHOOKS.Id,
PERMISSION_MANAGE_OUTGOING_WEBHOOKS.Id,
+ PERMISSION_CONVERT_PUBLIC_CHANNEL_TO_PRIVATE.Id,
+ PERMISSION_CONVERT_PRIVATE_CHANNEL_TO_PUBLIC.Id,
},
SchemeManaged: true,
BuiltIn: true,
@@ -562,6 +716,38 @@ func MakeDefaultRoles() map[string]*Role {
BuiltIn: true,
}
+ roles[SYSTEM_USER_MANAGER_ROLE_ID] = &Role{
+ Name: "system_user_manager",
+ DisplayName: "authentication.roles.system_user_manager.name",
+ Description: "authentication.roles.system_user_manager.description",
+ Permissions: SystemUserManagerDefaultPermissions,
+ SchemeManaged: false,
+ BuiltIn: true,
+ }
+
+ roles[SYSTEM_READ_ONLY_ADMIN_ROLE_ID] = &Role{
+ Name: "system_read_only_admin",
+ DisplayName: "authentication.roles.system_read_only_admin.name",
+ Description: "authentication.roles.system_read_only_admin.description",
+ Permissions: SystemReadOnlyAdminDefaultPermissions,
+ SchemeManaged: false,
+ BuiltIn: true,
+ }
+
+ roles[SYSTEM_MANAGER_ROLE_ID] = &Role{
+ Name: "system_manager",
+ DisplayName: "authentication.roles.system_manager.name",
+ Description: "authentication.roles.system_manager.description",
+ Permissions: SystemManagerDefaultPermissions,
+ SchemeManaged: false,
+ BuiltIn: true,
+ }
+
+ allPermissionIDs := []string{}
+ for _, permission := range AllPermissions {
+ allPermissionIDs = append(allPermissionIDs, permission.Id)
+ }
+
roles[SYSTEM_ADMIN_ROLE_ID] = &Role{
Name: "system_admin",
DisplayName: "authentication.roles.global_admin.name",
@@ -569,64 +755,21 @@ func MakeDefaultRoles() map[string]*Role {
// System admins can do anything channel and team admins can do
// plus everything members of teams and channels can do to all teams
// and channels on the system
- Permissions: append(
- append(
- append(
- append(
- []string{
- PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE.Id,
- PERMISSION_MANAGE_SYSTEM.Id,
- PERMISSION_MANAGE_ROLES.Id,
- PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES.Id,
- PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id,
- PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS.Id,
- PERMISSION_DELETE_PUBLIC_CHANNEL.Id,
- PERMISSION_CREATE_PUBLIC_CHANNEL.Id,
- PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES.Id,
- PERMISSION_DELETE_PRIVATE_CHANNEL.Id,
- PERMISSION_CREATE_PRIVATE_CHANNEL.Id,
- PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH.Id,
- PERMISSION_MANAGE_OTHERS_INCOMING_WEBHOOKS.Id,
- PERMISSION_MANAGE_OTHERS_OUTGOING_WEBHOOKS.Id,
- PERMISSION_EDIT_OTHER_USERS.Id,
- PERMISSION_EDIT_OTHERS_POSTS.Id,
- PERMISSION_MANAGE_OAUTH.Id,
- PERMISSION_INVITE_USER.Id,
- PERMISSION_INVITE_GUEST.Id,
- PERMISSION_PROMOTE_GUEST.Id,
- PERMISSION_DEMOTE_TO_GUEST.Id,
- PERMISSION_DELETE_POST.Id,
- PERMISSION_DELETE_OTHERS_POSTS.Id,
- PERMISSION_CREATE_TEAM.Id,
- PERMISSION_ADD_USER_TO_TEAM.Id,
- PERMISSION_LIST_USERS_WITHOUT_TEAM.Id,
- PERMISSION_MANAGE_JOBS.Id,
- PERMISSION_CREATE_POST_PUBLIC.Id,
- PERMISSION_CREATE_POST_EPHEMERAL.Id,
- PERMISSION_CREATE_USER_ACCESS_TOKEN.Id,
- PERMISSION_READ_USER_ACCESS_TOKEN.Id,
- PERMISSION_REVOKE_USER_ACCESS_TOKEN.Id,
- PERMISSION_CREATE_BOT.Id,
- PERMISSION_READ_BOTS.Id,
- PERMISSION_READ_OTHERS_BOTS.Id,
- PERMISSION_MANAGE_BOTS.Id,
- PERMISSION_MANAGE_OTHERS_BOTS.Id,
- PERMISSION_REMOVE_OTHERS_REACTIONS.Id,
- PERMISSION_LIST_PRIVATE_TEAMS.Id,
- PERMISSION_JOIN_PRIVATE_TEAMS.Id,
- PERMISSION_VIEW_MEMBERS.Id,
- },
- roles[TEAM_USER_ROLE_ID].Permissions...,
- ),
- roles[CHANNEL_USER_ROLE_ID].Permissions...,
- ),
- roles[TEAM_ADMIN_ROLE_ID].Permissions...,
- ),
- roles[CHANNEL_ADMIN_ROLE_ID].Permissions...,
- ),
+ Permissions: allPermissionIDs,
SchemeManaged: true,
BuiltIn: true,
}
return roles
}
+
+func addAncillaryPermissions(permissions []string) []string {
+ for _, permission := range permissions {
+ if ancillaryPermissions, ok := SysconsoleAncillaryPermissions[permission]; ok {
+ for _, ancillaryPermission := range ancillaryPermissions {
+ permissions = append(permissions, ancillaryPermission.Id)
+ }
+ }
+ }
+ return permissions
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/saml.go b/vendor/github.com/mattermost/mattermost-server/v5/model/saml.go
index 59ac2acc..feaf325a 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/saml.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/saml.go
@@ -15,6 +15,7 @@ const (
USER_AUTH_SERVICE_SAML_TEXT = "SAML"
USER_AUTH_SERVICE_IS_SAML = "isSaml"
USER_AUTH_SERVICE_IS_MOBILE = "isMobile"
+ USER_AUTH_SERVICE_IS_OAUTH = "isOAuthUser"
)
type SamlAuthRequest struct {
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/search_params.go b/vendor/github.com/mattermost/mattermost-server/v5/model/search_params.go
index e6dce73c..d34c8865 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/search_params.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/search_params.go
@@ -4,6 +4,7 @@
package model
import (
+ "net/http"
"regexp"
"strings"
"time"
@@ -367,3 +368,13 @@ func ParseSearchParams(text string, timeZoneOffset int) []*SearchParams {
return paramsList
}
+
+func IsSearchParamsListValid(paramsList []*SearchParams) *AppError {
+ // All SearchParams should have same IncludeDeletedChannels value.
+ for _, params := range paramsList {
+ if params.IncludeDeletedChannels != paramsList[0].IncludeDeletedChannels {
+ return NewAppError("IsSearchParamsListValid", "model.search_params_list.is_valid.include_deleted_channels.app_error", nil, "", http.StatusInternalServerError)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/serialized_gen.go b/vendor/github.com/mattermost/mattermost-server/v5/model/serialized_gen.go
new file mode 100644
index 00000000..c64d88de
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/serialized_gen.go
@@ -0,0 +1,1622 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See LICENSE.txt for license information.
+
+package model
+
+// Code generated by github.com/tinylib/msgp DO NOT EDIT.
+
+import (
+ "github.com/tinylib/msgp/msgp"
+)
+
+// DecodeMsg implements msgp.Decodable
+func (z *Session) DecodeMsg(dc *msgp.Reader) (err error) {
+ var zb0001 uint32
+ zb0001, err = dc.ReadArrayHeader()
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 != 13 {
+ err = msgp.ArrayError{Wanted: 13, Got: zb0001}
+ return
+ }
+ z.Id, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Id")
+ return
+ }
+ z.Token, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Token")
+ return
+ }
+ z.CreateAt, err = dc.ReadInt64()
+ if err != nil {
+ err = msgp.WrapError(err, "CreateAt")
+ return
+ }
+ z.ExpiresAt, err = dc.ReadInt64()
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiresAt")
+ return
+ }
+ z.LastActivityAt, err = dc.ReadInt64()
+ if err != nil {
+ err = msgp.WrapError(err, "LastActivityAt")
+ return
+ }
+ z.UserId, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "UserId")
+ return
+ }
+ z.DeviceId, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "DeviceId")
+ return
+ }
+ z.Roles, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Roles")
+ return
+ }
+ z.IsOAuth, err = dc.ReadBool()
+ if err != nil {
+ err = msgp.WrapError(err, "IsOAuth")
+ return
+ }
+ z.ExpiredNotify, err = dc.ReadBool()
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredNotify")
+ return
+ }
+ var zb0002 uint32
+ zb0002, err = dc.ReadMapHeader()
+ if err != nil {
+ err = msgp.WrapError(err, "Props")
+ return
+ }
+ if z.Props == nil {
+ z.Props = make(StringMap, zb0002)
+ } else if len(z.Props) > 0 {
+ for key := range z.Props {
+ delete(z.Props, key)
+ }
+ }
+ for zb0002 > 0 {
+ zb0002--
+ var za0001 string
+ var za0002 string
+ za0001, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Props")
+ return
+ }
+ za0002, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Props", za0001)
+ return
+ }
+ z.Props[za0001] = za0002
+ }
+ var zb0003 uint32
+ zb0003, err = dc.ReadArrayHeader()
+ if err != nil {
+ err = msgp.WrapError(err, "TeamMembers")
+ return
+ }
+ if cap(z.TeamMembers) >= int(zb0003) {
+ z.TeamMembers = (z.TeamMembers)[:zb0003]
+ } else {
+ z.TeamMembers = make([]*TeamMember, zb0003)
+ }
+ for za0003 := range z.TeamMembers {
+ if dc.IsNil() {
+ err = dc.ReadNil()
+ if err != nil {
+ err = msgp.WrapError(err, "TeamMembers", za0003)
+ return
+ }
+ z.TeamMembers[za0003] = nil
+ } else {
+ if z.TeamMembers[za0003] == nil {
+ z.TeamMembers[za0003] = new(TeamMember)
+ }
+ err = z.TeamMembers[za0003].DecodeMsg(dc)
+ if err != nil {
+ err = msgp.WrapError(err, "TeamMembers", za0003)
+ return
+ }
+ }
+ }
+ z.Local, err = dc.ReadBool()
+ if err != nil {
+ err = msgp.WrapError(err, "Local")
+ return
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *Session) EncodeMsg(en *msgp.Writer) (err error) {
+ // array header, size 13
+ err = en.Append(0x9d)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.Id)
+ if err != nil {
+ err = msgp.WrapError(err, "Id")
+ return
+ }
+ err = en.WriteString(z.Token)
+ if err != nil {
+ err = msgp.WrapError(err, "Token")
+ return
+ }
+ err = en.WriteInt64(z.CreateAt)
+ if err != nil {
+ err = msgp.WrapError(err, "CreateAt")
+ return
+ }
+ err = en.WriteInt64(z.ExpiresAt)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiresAt")
+ return
+ }
+ err = en.WriteInt64(z.LastActivityAt)
+ if err != nil {
+ err = msgp.WrapError(err, "LastActivityAt")
+ return
+ }
+ err = en.WriteString(z.UserId)
+ if err != nil {
+ err = msgp.WrapError(err, "UserId")
+ return
+ }
+ err = en.WriteString(z.DeviceId)
+ if err != nil {
+ err = msgp.WrapError(err, "DeviceId")
+ return
+ }
+ err = en.WriteString(z.Roles)
+ if err != nil {
+ err = msgp.WrapError(err, "Roles")
+ return
+ }
+ err = en.WriteBool(z.IsOAuth)
+ if err != nil {
+ err = msgp.WrapError(err, "IsOAuth")
+ return
+ }
+ err = en.WriteBool(z.ExpiredNotify)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredNotify")
+ return
+ }
+ err = en.WriteMapHeader(uint32(len(z.Props)))
+ if err != nil {
+ err = msgp.WrapError(err, "Props")
+ return
+ }
+ for za0001, za0002 := range z.Props {
+ err = en.WriteString(za0001)
+ if err != nil {
+ err = msgp.WrapError(err, "Props")
+ return
+ }
+ err = en.WriteString(za0002)
+ if err != nil {
+ err = msgp.WrapError(err, "Props", za0001)
+ return
+ }
+ }
+ err = en.WriteArrayHeader(uint32(len(z.TeamMembers)))
+ if err != nil {
+ err = msgp.WrapError(err, "TeamMembers")
+ return
+ }
+ for za0003 := range z.TeamMembers {
+ if z.TeamMembers[za0003] == nil {
+ err = en.WriteNil()
+ if err != nil {
+ return
+ }
+ } else {
+ err = z.TeamMembers[za0003].EncodeMsg(en)
+ if err != nil {
+ err = msgp.WrapError(err, "TeamMembers", za0003)
+ return
+ }
+ }
+ }
+ err = en.WriteBool(z.Local)
+ if err != nil {
+ err = msgp.WrapError(err, "Local")
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *Session) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // array header, size 13
+ o = append(o, 0x9d)
+ o = msgp.AppendString(o, z.Id)
+ o = msgp.AppendString(o, z.Token)
+ o = msgp.AppendInt64(o, z.CreateAt)
+ o = msgp.AppendInt64(o, z.ExpiresAt)
+ o = msgp.AppendInt64(o, z.LastActivityAt)
+ o = msgp.AppendString(o, z.UserId)
+ o = msgp.AppendString(o, z.DeviceId)
+ o = msgp.AppendString(o, z.Roles)
+ o = msgp.AppendBool(o, z.IsOAuth)
+ o = msgp.AppendBool(o, z.ExpiredNotify)
+ o = msgp.AppendMapHeader(o, uint32(len(z.Props)))
+ for za0001, za0002 := range z.Props {
+ o = msgp.AppendString(o, za0001)
+ o = msgp.AppendString(o, za0002)
+ }
+ o = msgp.AppendArrayHeader(o, uint32(len(z.TeamMembers)))
+ for za0003 := range z.TeamMembers {
+ if z.TeamMembers[za0003] == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o, err = z.TeamMembers[za0003].MarshalMsg(o)
+ if err != nil {
+ err = msgp.WrapError(err, "TeamMembers", za0003)
+ return
+ }
+ }
+ }
+ o = msgp.AppendBool(o, z.Local)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *Session) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var zb0001 uint32
+ zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 != 13 {
+ err = msgp.ArrayError{Wanted: 13, Got: zb0001}
+ return
+ }
+ z.Id, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Id")
+ return
+ }
+ z.Token, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Token")
+ return
+ }
+ z.CreateAt, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "CreateAt")
+ return
+ }
+ z.ExpiresAt, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiresAt")
+ return
+ }
+ z.LastActivityAt, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LastActivityAt")
+ return
+ }
+ z.UserId, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "UserId")
+ return
+ }
+ z.DeviceId, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "DeviceId")
+ return
+ }
+ z.Roles, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Roles")
+ return
+ }
+ z.IsOAuth, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "IsOAuth")
+ return
+ }
+ z.ExpiredNotify, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpiredNotify")
+ return
+ }
+ var zb0002 uint32
+ zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Props")
+ return
+ }
+ if z.Props == nil {
+ z.Props = make(StringMap, zb0002)
+ } else if len(z.Props) > 0 {
+ for key := range z.Props {
+ delete(z.Props, key)
+ }
+ }
+ for zb0002 > 0 {
+ var za0001 string
+ var za0002 string
+ zb0002--
+ za0001, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Props")
+ return
+ }
+ za0002, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Props", za0001)
+ return
+ }
+ z.Props[za0001] = za0002
+ }
+ var zb0003 uint32
+ zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TeamMembers")
+ return
+ }
+ if cap(z.TeamMembers) >= int(zb0003) {
+ z.TeamMembers = (z.TeamMembers)[:zb0003]
+ } else {
+ z.TeamMembers = make([]*TeamMember, zb0003)
+ }
+ for za0003 := range z.TeamMembers {
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ z.TeamMembers[za0003] = nil
+ } else {
+ if z.TeamMembers[za0003] == nil {
+ z.TeamMembers[za0003] = new(TeamMember)
+ }
+ bts, err = z.TeamMembers[za0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TeamMembers", za0003)
+ return
+ }
+ }
+ }
+ z.Local, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Local")
+ return
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *Session) Msgsize() (s int) {
+ s = 1 + msgp.StringPrefixSize + len(z.Id) + msgp.StringPrefixSize + len(z.Token) + msgp.Int64Size + msgp.Int64Size + msgp.Int64Size + msgp.StringPrefixSize + len(z.UserId) + msgp.StringPrefixSize + len(z.DeviceId) + msgp.StringPrefixSize + len(z.Roles) + msgp.BoolSize + msgp.BoolSize + msgp.MapHeaderSize
+ if z.Props != nil {
+ for za0001, za0002 := range z.Props {
+ _ = za0002
+ s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002)
+ }
+ }
+ s += msgp.ArrayHeaderSize
+ for za0003 := range z.TeamMembers {
+ if z.TeamMembers[za0003] == nil {
+ s += msgp.NilSize
+ } else {
+ s += z.TeamMembers[za0003].Msgsize()
+ }
+ }
+ s += msgp.BoolSize
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *StringMap) DecodeMsg(dc *msgp.Reader) (err error) {
+ var zb0003 uint32
+ zb0003, err = dc.ReadMapHeader()
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if (*z) == nil {
+ (*z) = make(StringMap, zb0003)
+ } else if len((*z)) > 0 {
+ for key := range *z {
+ delete((*z), key)
+ }
+ }
+ for zb0003 > 0 {
+ zb0003--
+ var zb0001 string
+ var zb0002 string
+ zb0001, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ zb0002, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, zb0001)
+ return
+ }
+ (*z)[zb0001] = zb0002
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z StringMap) EncodeMsg(en *msgp.Writer) (err error) {
+ err = en.WriteMapHeader(uint32(len(z)))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ for zb0004, zb0005 := range z {
+ err = en.WriteString(zb0004)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ err = en.WriteString(zb0005)
+ if err != nil {
+ err = msgp.WrapError(err, zb0004)
+ return
+ }
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z StringMap) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ o = msgp.AppendMapHeader(o, uint32(len(z)))
+ for zb0004, zb0005 := range z {
+ o = msgp.AppendString(o, zb0004)
+ o = msgp.AppendString(o, zb0005)
+ }
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *StringMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var zb0003 uint32
+ zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if (*z) == nil {
+ (*z) = make(StringMap, zb0003)
+ } else if len((*z)) > 0 {
+ for key := range *z {
+ delete((*z), key)
+ }
+ }
+ for zb0003 > 0 {
+ var zb0001 string
+ var zb0002 string
+ zb0003--
+ zb0001, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ zb0002, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, zb0001)
+ return
+ }
+ (*z)[zb0001] = zb0002
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z StringMap) Msgsize() (s int) {
+ s = msgp.MapHeaderSize
+ if z != nil {
+ for zb0004, zb0005 := range z {
+ _ = zb0005
+ s += msgp.StringPrefixSize + len(zb0004) + msgp.StringPrefixSize + len(zb0005)
+ }
+ }
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *TeamMember) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, err = dc.ReadMapHeader()
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "TeamId":
+ z.TeamId, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "TeamId")
+ return
+ }
+ case "UserId":
+ z.UserId, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "UserId")
+ return
+ }
+ case "Roles":
+ z.Roles, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Roles")
+ return
+ }
+ case "DeleteAt":
+ z.DeleteAt, err = dc.ReadInt64()
+ if err != nil {
+ err = msgp.WrapError(err, "DeleteAt")
+ return
+ }
+ case "SchemeGuest":
+ z.SchemeGuest, err = dc.ReadBool()
+ if err != nil {
+ err = msgp.WrapError(err, "SchemeGuest")
+ return
+ }
+ case "SchemeUser":
+ z.SchemeUser, err = dc.ReadBool()
+ if err != nil {
+ err = msgp.WrapError(err, "SchemeUser")
+ return
+ }
+ case "SchemeAdmin":
+ z.SchemeAdmin, err = dc.ReadBool()
+ if err != nil {
+ err = msgp.WrapError(err, "SchemeAdmin")
+ return
+ }
+ case "ExplicitRoles":
+ z.ExplicitRoles, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "ExplicitRoles")
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *TeamMember) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 8
+ // write "TeamId"
+ err = en.Append(0x88, 0xa6, 0x54, 0x65, 0x61, 0x6d, 0x49, 0x64)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.TeamId)
+ if err != nil {
+ err = msgp.WrapError(err, "TeamId")
+ return
+ }
+ // write "UserId"
+ err = en.Append(0xa6, 0x55, 0x73, 0x65, 0x72, 0x49, 0x64)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.UserId)
+ if err != nil {
+ err = msgp.WrapError(err, "UserId")
+ return
+ }
+ // write "Roles"
+ err = en.Append(0xa5, 0x52, 0x6f, 0x6c, 0x65, 0x73)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.Roles)
+ if err != nil {
+ err = msgp.WrapError(err, "Roles")
+ return
+ }
+ // write "DeleteAt"
+ err = en.Append(0xa8, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x74)
+ if err != nil {
+ return
+ }
+ err = en.WriteInt64(z.DeleteAt)
+ if err != nil {
+ err = msgp.WrapError(err, "DeleteAt")
+ return
+ }
+ // write "SchemeGuest"
+ err = en.Append(0xab, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x47, 0x75, 0x65, 0x73, 0x74)
+ if err != nil {
+ return
+ }
+ err = en.WriteBool(z.SchemeGuest)
+ if err != nil {
+ err = msgp.WrapError(err, "SchemeGuest")
+ return
+ }
+ // write "SchemeUser"
+ err = en.Append(0xaa, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72)
+ if err != nil {
+ return
+ }
+ err = en.WriteBool(z.SchemeUser)
+ if err != nil {
+ err = msgp.WrapError(err, "SchemeUser")
+ return
+ }
+ // write "SchemeAdmin"
+ err = en.Append(0xab, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e)
+ if err != nil {
+ return
+ }
+ err = en.WriteBool(z.SchemeAdmin)
+ if err != nil {
+ err = msgp.WrapError(err, "SchemeAdmin")
+ return
+ }
+ // write "ExplicitRoles"
+ err = en.Append(0xad, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x52, 0x6f, 0x6c, 0x65, 0x73)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.ExplicitRoles)
+ if err != nil {
+ err = msgp.WrapError(err, "ExplicitRoles")
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *TeamMember) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 8
+ // string "TeamId"
+ o = append(o, 0x88, 0xa6, 0x54, 0x65, 0x61, 0x6d, 0x49, 0x64)
+ o = msgp.AppendString(o, z.TeamId)
+ // string "UserId"
+ o = append(o, 0xa6, 0x55, 0x73, 0x65, 0x72, 0x49, 0x64)
+ o = msgp.AppendString(o, z.UserId)
+ // string "Roles"
+ o = append(o, 0xa5, 0x52, 0x6f, 0x6c, 0x65, 0x73)
+ o = msgp.AppendString(o, z.Roles)
+ // string "DeleteAt"
+ o = append(o, 0xa8, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x74)
+ o = msgp.AppendInt64(o, z.DeleteAt)
+ // string "SchemeGuest"
+ o = append(o, 0xab, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x47, 0x75, 0x65, 0x73, 0x74)
+ o = msgp.AppendBool(o, z.SchemeGuest)
+ // string "SchemeUser"
+ o = append(o, 0xaa, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72)
+ o = msgp.AppendBool(o, z.SchemeUser)
+ // string "SchemeAdmin"
+ o = append(o, 0xab, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e)
+ o = msgp.AppendBool(o, z.SchemeAdmin)
+ // string "ExplicitRoles"
+ o = append(o, 0xad, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x52, 0x6f, 0x6c, 0x65, 0x73)
+ o = msgp.AppendString(o, z.ExplicitRoles)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *TeamMember) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "TeamId":
+ z.TeamId, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TeamId")
+ return
+ }
+ case "UserId":
+ z.UserId, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "UserId")
+ return
+ }
+ case "Roles":
+ z.Roles, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Roles")
+ return
+ }
+ case "DeleteAt":
+ z.DeleteAt, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "DeleteAt")
+ return
+ }
+ case "SchemeGuest":
+ z.SchemeGuest, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "SchemeGuest")
+ return
+ }
+ case "SchemeUser":
+ z.SchemeUser, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "SchemeUser")
+ return
+ }
+ case "SchemeAdmin":
+ z.SchemeAdmin, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "SchemeAdmin")
+ return
+ }
+ case "ExplicitRoles":
+ z.ExplicitRoles, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExplicitRoles")
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *TeamMember) Msgsize() (s int) {
+ s = 1 + 7 + msgp.StringPrefixSize + len(z.TeamId) + 7 + msgp.StringPrefixSize + len(z.UserId) + 6 + msgp.StringPrefixSize + len(z.Roles) + 9 + msgp.Int64Size + 12 + msgp.BoolSize + 11 + msgp.BoolSize + 12 + msgp.BoolSize + 14 + msgp.StringPrefixSize + len(z.ExplicitRoles)
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *User) DecodeMsg(dc *msgp.Reader) (err error) {
+ var zb0001 uint32
+ zb0001, err = dc.ReadArrayHeader()
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 != 31 {
+ err = msgp.ArrayError{Wanted: 31, Got: zb0001}
+ return
+ }
+ z.Id, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Id")
+ return
+ }
+ z.CreateAt, err = dc.ReadInt64()
+ if err != nil {
+ err = msgp.WrapError(err, "CreateAt")
+ return
+ }
+ z.UpdateAt, err = dc.ReadInt64()
+ if err != nil {
+ err = msgp.WrapError(err, "UpdateAt")
+ return
+ }
+ z.DeleteAt, err = dc.ReadInt64()
+ if err != nil {
+ err = msgp.WrapError(err, "DeleteAt")
+ return
+ }
+ z.Username, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Username")
+ return
+ }
+ z.Password, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Password")
+ return
+ }
+ if dc.IsNil() {
+ err = dc.ReadNil()
+ if err != nil {
+ err = msgp.WrapError(err, "AuthData")
+ return
+ }
+ z.AuthData = nil
+ } else {
+ if z.AuthData == nil {
+ z.AuthData = new(string)
+ }
+ *z.AuthData, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "AuthData")
+ return
+ }
+ }
+ z.AuthService, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "AuthService")
+ return
+ }
+ z.Email, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Email")
+ return
+ }
+ z.EmailVerified, err = dc.ReadBool()
+ if err != nil {
+ err = msgp.WrapError(err, "EmailVerified")
+ return
+ }
+ z.Nickname, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Nickname")
+ return
+ }
+ z.FirstName, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "FirstName")
+ return
+ }
+ z.LastName, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "LastName")
+ return
+ }
+ z.Position, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Position")
+ return
+ }
+ z.Roles, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Roles")
+ return
+ }
+ z.AllowMarketing, err = dc.ReadBool()
+ if err != nil {
+ err = msgp.WrapError(err, "AllowMarketing")
+ return
+ }
+ var zb0002 uint32
+ zb0002, err = dc.ReadMapHeader()
+ if err != nil {
+ err = msgp.WrapError(err, "Props")
+ return
+ }
+ if z.Props == nil {
+ z.Props = make(StringMap, zb0002)
+ } else if len(z.Props) > 0 {
+ for key := range z.Props {
+ delete(z.Props, key)
+ }
+ }
+ for zb0002 > 0 {
+ zb0002--
+ var za0001 string
+ var za0002 string
+ za0001, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Props")
+ return
+ }
+ za0002, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Props", za0001)
+ return
+ }
+ z.Props[za0001] = za0002
+ }
+ var zb0003 uint32
+ zb0003, err = dc.ReadMapHeader()
+ if err != nil {
+ err = msgp.WrapError(err, "NotifyProps")
+ return
+ }
+ if z.NotifyProps == nil {
+ z.NotifyProps = make(StringMap, zb0003)
+ } else if len(z.NotifyProps) > 0 {
+ for key := range z.NotifyProps {
+ delete(z.NotifyProps, key)
+ }
+ }
+ for zb0003 > 0 {
+ zb0003--
+ var za0003 string
+ var za0004 string
+ za0003, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "NotifyProps")
+ return
+ }
+ za0004, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "NotifyProps", za0003)
+ return
+ }
+ z.NotifyProps[za0003] = za0004
+ }
+ z.LastPasswordUpdate, err = dc.ReadInt64()
+ if err != nil {
+ err = msgp.WrapError(err, "LastPasswordUpdate")
+ return
+ }
+ z.LastPictureUpdate, err = dc.ReadInt64()
+ if err != nil {
+ err = msgp.WrapError(err, "LastPictureUpdate")
+ return
+ }
+ z.FailedAttempts, err = dc.ReadInt()
+ if err != nil {
+ err = msgp.WrapError(err, "FailedAttempts")
+ return
+ }
+ z.Locale, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Locale")
+ return
+ }
+ var zb0004 uint32
+ zb0004, err = dc.ReadMapHeader()
+ if err != nil {
+ err = msgp.WrapError(err, "Timezone")
+ return
+ }
+ if z.Timezone == nil {
+ z.Timezone = make(StringMap, zb0004)
+ } else if len(z.Timezone) > 0 {
+ for key := range z.Timezone {
+ delete(z.Timezone, key)
+ }
+ }
+ for zb0004 > 0 {
+ zb0004--
+ var za0005 string
+ var za0006 string
+ za0005, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Timezone")
+ return
+ }
+ za0006, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "Timezone", za0005)
+ return
+ }
+ z.Timezone[za0005] = za0006
+ }
+ z.MfaActive, err = dc.ReadBool()
+ if err != nil {
+ err = msgp.WrapError(err, "MfaActive")
+ return
+ }
+ z.MfaSecret, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "MfaSecret")
+ return
+ }
+ z.LastActivityAt, err = dc.ReadInt64()
+ if err != nil {
+ err = msgp.WrapError(err, "LastActivityAt")
+ return
+ }
+ z.IsBot, err = dc.ReadBool()
+ if err != nil {
+ err = msgp.WrapError(err, "IsBot")
+ return
+ }
+ z.BotDescription, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "BotDescription")
+ return
+ }
+ z.BotLastIconUpdate, err = dc.ReadInt64()
+ if err != nil {
+ err = msgp.WrapError(err, "BotLastIconUpdate")
+ return
+ }
+ z.TermsOfServiceId, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "TermsOfServiceId")
+ return
+ }
+ z.TermsOfServiceCreateAt, err = dc.ReadInt64()
+ if err != nil {
+ err = msgp.WrapError(err, "TermsOfServiceCreateAt")
+ return
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *User) EncodeMsg(en *msgp.Writer) (err error) {
+ // array header, size 31
+ err = en.Append(0xdc, 0x0, 0x1f)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.Id)
+ if err != nil {
+ err = msgp.WrapError(err, "Id")
+ return
+ }
+ err = en.WriteInt64(z.CreateAt)
+ if err != nil {
+ err = msgp.WrapError(err, "CreateAt")
+ return
+ }
+ err = en.WriteInt64(z.UpdateAt)
+ if err != nil {
+ err = msgp.WrapError(err, "UpdateAt")
+ return
+ }
+ err = en.WriteInt64(z.DeleteAt)
+ if err != nil {
+ err = msgp.WrapError(err, "DeleteAt")
+ return
+ }
+ err = en.WriteString(z.Username)
+ if err != nil {
+ err = msgp.WrapError(err, "Username")
+ return
+ }
+ err = en.WriteString(z.Password)
+ if err != nil {
+ err = msgp.WrapError(err, "Password")
+ return
+ }
+ if z.AuthData == nil {
+ err = en.WriteNil()
+ if err != nil {
+ return
+ }
+ } else {
+ err = en.WriteString(*z.AuthData)
+ if err != nil {
+ err = msgp.WrapError(err, "AuthData")
+ return
+ }
+ }
+ err = en.WriteString(z.AuthService)
+ if err != nil {
+ err = msgp.WrapError(err, "AuthService")
+ return
+ }
+ err = en.WriteString(z.Email)
+ if err != nil {
+ err = msgp.WrapError(err, "Email")
+ return
+ }
+ err = en.WriteBool(z.EmailVerified)
+ if err != nil {
+ err = msgp.WrapError(err, "EmailVerified")
+ return
+ }
+ err = en.WriteString(z.Nickname)
+ if err != nil {
+ err = msgp.WrapError(err, "Nickname")
+ return
+ }
+ err = en.WriteString(z.FirstName)
+ if err != nil {
+ err = msgp.WrapError(err, "FirstName")
+ return
+ }
+ err = en.WriteString(z.LastName)
+ if err != nil {
+ err = msgp.WrapError(err, "LastName")
+ return
+ }
+ err = en.WriteString(z.Position)
+ if err != nil {
+ err = msgp.WrapError(err, "Position")
+ return
+ }
+ err = en.WriteString(z.Roles)
+ if err != nil {
+ err = msgp.WrapError(err, "Roles")
+ return
+ }
+ err = en.WriteBool(z.AllowMarketing)
+ if err != nil {
+ err = msgp.WrapError(err, "AllowMarketing")
+ return
+ }
+ err = en.WriteMapHeader(uint32(len(z.Props)))
+ if err != nil {
+ err = msgp.WrapError(err, "Props")
+ return
+ }
+ for za0001, za0002 := range z.Props {
+ err = en.WriteString(za0001)
+ if err != nil {
+ err = msgp.WrapError(err, "Props")
+ return
+ }
+ err = en.WriteString(za0002)
+ if err != nil {
+ err = msgp.WrapError(err, "Props", za0001)
+ return
+ }
+ }
+ err = en.WriteMapHeader(uint32(len(z.NotifyProps)))
+ if err != nil {
+ err = msgp.WrapError(err, "NotifyProps")
+ return
+ }
+ for za0003, za0004 := range z.NotifyProps {
+ err = en.WriteString(za0003)
+ if err != nil {
+ err = msgp.WrapError(err, "NotifyProps")
+ return
+ }
+ err = en.WriteString(za0004)
+ if err != nil {
+ err = msgp.WrapError(err, "NotifyProps", za0003)
+ return
+ }
+ }
+ err = en.WriteInt64(z.LastPasswordUpdate)
+ if err != nil {
+ err = msgp.WrapError(err, "LastPasswordUpdate")
+ return
+ }
+ err = en.WriteInt64(z.LastPictureUpdate)
+ if err != nil {
+ err = msgp.WrapError(err, "LastPictureUpdate")
+ return
+ }
+ err = en.WriteInt(z.FailedAttempts)
+ if err != nil {
+ err = msgp.WrapError(err, "FailedAttempts")
+ return
+ }
+ err = en.WriteString(z.Locale)
+ if err != nil {
+ err = msgp.WrapError(err, "Locale")
+ return
+ }
+ err = en.WriteMapHeader(uint32(len(z.Timezone)))
+ if err != nil {
+ err = msgp.WrapError(err, "Timezone")
+ return
+ }
+ for za0005, za0006 := range z.Timezone {
+ err = en.WriteString(za0005)
+ if err != nil {
+ err = msgp.WrapError(err, "Timezone")
+ return
+ }
+ err = en.WriteString(za0006)
+ if err != nil {
+ err = msgp.WrapError(err, "Timezone", za0005)
+ return
+ }
+ }
+ err = en.WriteBool(z.MfaActive)
+ if err != nil {
+ err = msgp.WrapError(err, "MfaActive")
+ return
+ }
+ err = en.WriteString(z.MfaSecret)
+ if err != nil {
+ err = msgp.WrapError(err, "MfaSecret")
+ return
+ }
+ err = en.WriteInt64(z.LastActivityAt)
+ if err != nil {
+ err = msgp.WrapError(err, "LastActivityAt")
+ return
+ }
+ err = en.WriteBool(z.IsBot)
+ if err != nil {
+ err = msgp.WrapError(err, "IsBot")
+ return
+ }
+ err = en.WriteString(z.BotDescription)
+ if err != nil {
+ err = msgp.WrapError(err, "BotDescription")
+ return
+ }
+ err = en.WriteInt64(z.BotLastIconUpdate)
+ if err != nil {
+ err = msgp.WrapError(err, "BotLastIconUpdate")
+ return
+ }
+ err = en.WriteString(z.TermsOfServiceId)
+ if err != nil {
+ err = msgp.WrapError(err, "TermsOfServiceId")
+ return
+ }
+ err = en.WriteInt64(z.TermsOfServiceCreateAt)
+ if err != nil {
+ err = msgp.WrapError(err, "TermsOfServiceCreateAt")
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *User) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // array header, size 31
+ o = append(o, 0xdc, 0x0, 0x1f)
+ o = msgp.AppendString(o, z.Id)
+ o = msgp.AppendInt64(o, z.CreateAt)
+ o = msgp.AppendInt64(o, z.UpdateAt)
+ o = msgp.AppendInt64(o, z.DeleteAt)
+ o = msgp.AppendString(o, z.Username)
+ o = msgp.AppendString(o, z.Password)
+ if z.AuthData == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendString(o, *z.AuthData)
+ }
+ o = msgp.AppendString(o, z.AuthService)
+ o = msgp.AppendString(o, z.Email)
+ o = msgp.AppendBool(o, z.EmailVerified)
+ o = msgp.AppendString(o, z.Nickname)
+ o = msgp.AppendString(o, z.FirstName)
+ o = msgp.AppendString(o, z.LastName)
+ o = msgp.AppendString(o, z.Position)
+ o = msgp.AppendString(o, z.Roles)
+ o = msgp.AppendBool(o, z.AllowMarketing)
+ o = msgp.AppendMapHeader(o, uint32(len(z.Props)))
+ for za0001, za0002 := range z.Props {
+ o = msgp.AppendString(o, za0001)
+ o = msgp.AppendString(o, za0002)
+ }
+ o = msgp.AppendMapHeader(o, uint32(len(z.NotifyProps)))
+ for za0003, za0004 := range z.NotifyProps {
+ o = msgp.AppendString(o, za0003)
+ o = msgp.AppendString(o, za0004)
+ }
+ o = msgp.AppendInt64(o, z.LastPasswordUpdate)
+ o = msgp.AppendInt64(o, z.LastPictureUpdate)
+ o = msgp.AppendInt(o, z.FailedAttempts)
+ o = msgp.AppendString(o, z.Locale)
+ o = msgp.AppendMapHeader(o, uint32(len(z.Timezone)))
+ for za0005, za0006 := range z.Timezone {
+ o = msgp.AppendString(o, za0005)
+ o = msgp.AppendString(o, za0006)
+ }
+ o = msgp.AppendBool(o, z.MfaActive)
+ o = msgp.AppendString(o, z.MfaSecret)
+ o = msgp.AppendInt64(o, z.LastActivityAt)
+ o = msgp.AppendBool(o, z.IsBot)
+ o = msgp.AppendString(o, z.BotDescription)
+ o = msgp.AppendInt64(o, z.BotLastIconUpdate)
+ o = msgp.AppendString(o, z.TermsOfServiceId)
+ o = msgp.AppendInt64(o, z.TermsOfServiceCreateAt)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *User) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var zb0001 uint32
+ zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 != 31 {
+ err = msgp.ArrayError{Wanted: 31, Got: zb0001}
+ return
+ }
+ z.Id, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Id")
+ return
+ }
+ z.CreateAt, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "CreateAt")
+ return
+ }
+ z.UpdateAt, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "UpdateAt")
+ return
+ }
+ z.DeleteAt, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "DeleteAt")
+ return
+ }
+ z.Username, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Username")
+ return
+ }
+ z.Password, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Password")
+ return
+ }
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ z.AuthData = nil
+ } else {
+ if z.AuthData == nil {
+ z.AuthData = new(string)
+ }
+ *z.AuthData, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AuthData")
+ return
+ }
+ }
+ z.AuthService, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AuthService")
+ return
+ }
+ z.Email, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Email")
+ return
+ }
+ z.EmailVerified, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "EmailVerified")
+ return
+ }
+ z.Nickname, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Nickname")
+ return
+ }
+ z.FirstName, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "FirstName")
+ return
+ }
+ z.LastName, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LastName")
+ return
+ }
+ z.Position, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Position")
+ return
+ }
+ z.Roles, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Roles")
+ return
+ }
+ z.AllowMarketing, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AllowMarketing")
+ return
+ }
+ var zb0002 uint32
+ zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Props")
+ return
+ }
+ if z.Props == nil {
+ z.Props = make(StringMap, zb0002)
+ } else if len(z.Props) > 0 {
+ for key := range z.Props {
+ delete(z.Props, key)
+ }
+ }
+ for zb0002 > 0 {
+ var za0001 string
+ var za0002 string
+ zb0002--
+ za0001, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Props")
+ return
+ }
+ za0002, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Props", za0001)
+ return
+ }
+ z.Props[za0001] = za0002
+ }
+ var zb0003 uint32
+ zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "NotifyProps")
+ return
+ }
+ if z.NotifyProps == nil {
+ z.NotifyProps = make(StringMap, zb0003)
+ } else if len(z.NotifyProps) > 0 {
+ for key := range z.NotifyProps {
+ delete(z.NotifyProps, key)
+ }
+ }
+ for zb0003 > 0 {
+ var za0003 string
+ var za0004 string
+ zb0003--
+ za0003, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "NotifyProps")
+ return
+ }
+ za0004, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "NotifyProps", za0003)
+ return
+ }
+ z.NotifyProps[za0003] = za0004
+ }
+ z.LastPasswordUpdate, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LastPasswordUpdate")
+ return
+ }
+ z.LastPictureUpdate, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LastPictureUpdate")
+ return
+ }
+ z.FailedAttempts, bts, err = msgp.ReadIntBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "FailedAttempts")
+ return
+ }
+ z.Locale, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Locale")
+ return
+ }
+ var zb0004 uint32
+ zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Timezone")
+ return
+ }
+ if z.Timezone == nil {
+ z.Timezone = make(StringMap, zb0004)
+ } else if len(z.Timezone) > 0 {
+ for key := range z.Timezone {
+ delete(z.Timezone, key)
+ }
+ }
+ for zb0004 > 0 {
+ var za0005 string
+ var za0006 string
+ zb0004--
+ za0005, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Timezone")
+ return
+ }
+ za0006, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Timezone", za0005)
+ return
+ }
+ z.Timezone[za0005] = za0006
+ }
+ z.MfaActive, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "MfaActive")
+ return
+ }
+ z.MfaSecret, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "MfaSecret")
+ return
+ }
+ z.LastActivityAt, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LastActivityAt")
+ return
+ }
+ z.IsBot, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "IsBot")
+ return
+ }
+ z.BotDescription, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "BotDescription")
+ return
+ }
+ z.BotLastIconUpdate, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "BotLastIconUpdate")
+ return
+ }
+ z.TermsOfServiceId, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TermsOfServiceId")
+ return
+ }
+ z.TermsOfServiceCreateAt, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TermsOfServiceCreateAt")
+ return
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *User) Msgsize() (s int) {
+ s = 3 + msgp.StringPrefixSize + len(z.Id) + msgp.Int64Size + msgp.Int64Size + msgp.Int64Size + msgp.StringPrefixSize + len(z.Username) + msgp.StringPrefixSize + len(z.Password)
+ if z.AuthData == nil {
+ s += msgp.NilSize
+ } else {
+ s += msgp.StringPrefixSize + len(*z.AuthData)
+ }
+ s += msgp.StringPrefixSize + len(z.AuthService) + msgp.StringPrefixSize + len(z.Email) + msgp.BoolSize + msgp.StringPrefixSize + len(z.Nickname) + msgp.StringPrefixSize + len(z.FirstName) + msgp.StringPrefixSize + len(z.LastName) + msgp.StringPrefixSize + len(z.Position) + msgp.StringPrefixSize + len(z.Roles) + msgp.BoolSize + msgp.MapHeaderSize
+ if z.Props != nil {
+ for za0001, za0002 := range z.Props {
+ _ = za0002
+ s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002)
+ }
+ }
+ s += msgp.MapHeaderSize
+ if z.NotifyProps != nil {
+ for za0003, za0004 := range z.NotifyProps {
+ _ = za0004
+ s += msgp.StringPrefixSize + len(za0003) + msgp.StringPrefixSize + len(za0004)
+ }
+ }
+ s += msgp.Int64Size + msgp.Int64Size + msgp.IntSize + msgp.StringPrefixSize + len(z.Locale) + msgp.MapHeaderSize
+ if z.Timezone != nil {
+ for za0005, za0006 := range z.Timezone {
+ _ = za0006
+ s += msgp.StringPrefixSize + len(za0005) + msgp.StringPrefixSize + len(za0006)
+ }
+ }
+ s += msgp.BoolSize + msgp.StringPrefixSize + len(z.MfaSecret) + msgp.Int64Size + msgp.BoolSize + msgp.StringPrefixSize + len(z.BotDescription) + msgp.Int64Size + msgp.StringPrefixSize + len(z.TermsOfServiceId) + msgp.Int64Size
+ return
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/session.go b/vendor/github.com/mattermost/mattermost-server/v5/model/session.go
index b5567a65..976e1229 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/session.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/session.go
@@ -30,6 +30,11 @@ const (
SESSION_USER_ACCESS_TOKEN_EXPIRY = 100 * 365 // 100 years
)
+//msgp:tuple Session
+
+// Session contains the user session details.
+// This struct's serializer methods are auto-generated. If a new field is added/removed,
+// please run make gen-serialized.
type Session struct {
Id string `json:"id"`
Token string `json:"token"`
@@ -40,6 +45,7 @@ type Session struct {
DeviceId string `json:"device_id"`
Roles string `json:"roles"`
IsOAuth bool `json:"is_oauth"`
+ ExpiredNotify bool `json:"expired_notify"`
Props StringMap `json:"props"`
TeamMembers []*TeamMember `json:"team_members" db:"-"`
Local bool `json:"local" db:"-"`
@@ -114,6 +120,9 @@ func (me *Session) IsExpired() bool {
return false
}
+// Deprecated: SetExpireInDays is deprecated and should not be used.
+// Use (*App).SetSessionExpireInDays instead which handles the
+// cases where the new ExpiresAt is not relative to CreateAt.
func (me *Session) SetExpireInDays(days int) {
if me.CreateAt == 0 {
me.ExpiresAt = GetMillis() + (1000 * 60 * 60 * 24 * int64(days))
@@ -171,8 +180,21 @@ func (me *Session) IsSaml() bool {
return isSaml
}
+func (me *Session) IsOAuthUser() bool {
+ val, ok := me.Props[USER_AUTH_SERVICE_IS_OAUTH]
+ if !ok {
+ return false
+ }
+ isOAuthUser, err := strconv.ParseBool(val)
+ if err != nil {
+ mlog.Error("Error parsing boolean property from Session", mlog.Err(err))
+ return false
+ }
+ return isOAuthUser
+}
+
func (me *Session) IsSSOLogin() bool {
- return me.IsOAuth || me.IsSaml()
+ return me.IsOAuthUser() || me.IsSaml()
}
func (me *Session) GetUserRoles() []string {
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/status.go b/vendor/github.com/mattermost/mattermost-server/v5/model/status.go
index 741fa1ed..1f32422a 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/status.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/status.go
@@ -35,7 +35,8 @@ func (o *Status) ToJson() string {
}
func (o *Status) ToClusterJson() string {
- b, _ := json.Marshal(o)
+ oCopy := *o
+ b, _ := json.Marshal(oCopy)
return string(b)
}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/system.go b/vendor/github.com/mattermost/mattermost-server/v5/model/system.go
index 4c3132e2..f826276f 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/system.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/system.go
@@ -10,15 +10,37 @@ import (
)
const (
- SYSTEM_DIAGNOSTIC_ID = "DiagnosticId"
- SYSTEM_RAN_UNIT_TESTS = "RanUnitTests"
- SYSTEM_LAST_SECURITY_TIME = "LastSecurityTime"
- SYSTEM_ACTIVE_LICENSE_ID = "ActiveLicenseId"
- SYSTEM_LAST_COMPLIANCE_TIME = "LastComplianceTime"
- SYSTEM_ASYMMETRIC_SIGNING_KEY = "AsymmetricSigningKey"
- SYSTEM_POST_ACTION_COOKIE_SECRET = "PostActionCookieSecret"
- SYSTEM_INSTALLATION_DATE_KEY = "InstallationDate"
- SYSTEM_FIRST_SERVER_RUN_TIMESTAMP_KEY = "FirstServerRunTimestamp"
+ SYSTEM_TELEMETRY_ID = "DiagnosticId"
+ SYSTEM_RAN_UNIT_TESTS = "RanUnitTests"
+ SYSTEM_LAST_SECURITY_TIME = "LastSecurityTime"
+ SYSTEM_ACTIVE_LICENSE_ID = "ActiveLicenseId"
+ SYSTEM_LAST_COMPLIANCE_TIME = "LastComplianceTime"
+ SYSTEM_ASYMMETRIC_SIGNING_KEY = "AsymmetricSigningKey"
+ SYSTEM_POST_ACTION_COOKIE_SECRET = "PostActionCookieSecret"
+ SYSTEM_INSTALLATION_DATE_KEY = "InstallationDate"
+ SYSTEM_FIRST_SERVER_RUN_TIMESTAMP_KEY = "FirstServerRunTimestamp"
+ SYSTEM_CLUSTER_ENCRYPTION_KEY = "ClusterEncryptionKey"
+ SYSTEM_UPGRADED_FROM_TE_ID = "UpgradedFromTE"
+ SYSTEM_WARN_METRIC_NUMBER_OF_TEAMS_5 = "warn_metric_number_of_teams_5"
+ SYSTEM_WARN_METRIC_NUMBER_OF_CHANNELS_50 = "warn_metric_number_of_channels_50"
+ SYSTEM_WARN_METRIC_MFA = "warn_metric_mfa"
+ SYSTEM_WARN_METRIC_EMAIL_DOMAIN = "warn_metric_email_domain"
+ SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_100 = "warn_metric_number_of_active_users_100"
+ SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_200 = "warn_metric_number_of_active_users_200"
+ SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_300 = "warn_metric_number_of_active_users_300"
+ SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_500 = "warn_metric_number_of_active_users_500"
+ SYSTEM_WARN_METRIC_NUMBER_OF_POSTS_2M = "warn_metric_number_of_posts_2M"
+ SYSTEM_WARN_METRIC_LAST_RUN_TIMESTAMP_KEY = "LastWarnMetricRunTimestamp"
+)
+
+const (
+ WARN_METRIC_STATUS_LIMIT_REACHED = "true"
+ WARN_METRIC_STATUS_RUNONCE = "runonce"
+ WARN_METRIC_STATUS_ACK = "ack"
+ WARN_METRIC_STATUS_STORE_PREFIX = "warn_metric_"
+ WARN_METRIC_JOB_INTERVAL = 24 * 7
+ WARN_METRIC_NUMBER_OF_ACTIVE_USERS_25 = 25
+ WARN_METRIC_JOB_WAIT_TIME = 1000 * 3600 * 24 * 7 // 7 days
)
type System struct {
@@ -69,3 +91,114 @@ func ServerBusyStateFromJson(r io.Reader) *ServerBusyState {
json.NewDecoder(r).Decode(&sbs)
return sbs
}
+
+var WarnMetricsTable = map[string]WarnMetric{
+ SYSTEM_WARN_METRIC_MFA: {
+ Id: SYSTEM_WARN_METRIC_MFA,
+ Limit: -1,
+ IsBotOnly: true,
+ IsRunOnce: true,
+ },
+ SYSTEM_WARN_METRIC_EMAIL_DOMAIN: {
+ Id: SYSTEM_WARN_METRIC_EMAIL_DOMAIN,
+ Limit: -1,
+ IsBotOnly: true,
+ IsRunOnce: true,
+ },
+ SYSTEM_WARN_METRIC_NUMBER_OF_TEAMS_5: {
+ Id: SYSTEM_WARN_METRIC_NUMBER_OF_TEAMS_5,
+ Limit: 5,
+ IsBotOnly: true,
+ IsRunOnce: true,
+ },
+ SYSTEM_WARN_METRIC_NUMBER_OF_CHANNELS_50: {
+ Id: SYSTEM_WARN_METRIC_NUMBER_OF_CHANNELS_50,
+ Limit: 50,
+ IsBotOnly: true,
+ IsRunOnce: true,
+ },
+ SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_100: {
+ Id: SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_100,
+ Limit: 100,
+ IsBotOnly: true,
+ IsRunOnce: true,
+ },
+ SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_200: {
+ Id: SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_200,
+ Limit: 200,
+ IsBotOnly: true,
+ IsRunOnce: true,
+ },
+ SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_300: {
+ Id: SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_300,
+ Limit: 300,
+ IsBotOnly: true,
+ IsRunOnce: true,
+ },
+ SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_500: {
+ Id: SYSTEM_WARN_METRIC_NUMBER_OF_ACTIVE_USERS_500,
+ Limit: 500,
+ IsBotOnly: false,
+ IsRunOnce: true,
+ },
+ SYSTEM_WARN_METRIC_NUMBER_OF_POSTS_2M: {
+ Id: SYSTEM_WARN_METRIC_NUMBER_OF_POSTS_2M,
+ Limit: 2000000,
+ IsBotOnly: false,
+ IsRunOnce: true,
+ },
+}
+
+type WarnMetric struct {
+ Id string
+ Limit int64
+ IsBotOnly bool
+ IsRunOnce bool
+}
+
+type WarnMetricDisplayTexts struct {
+ BotTitle string
+ BotMessageBody string
+ BotSuccessMessage string
+ EmailBody string
+}
+type WarnMetricStatus struct {
+ Id string `json:"id"`
+ Limit int64 `json:"limit"`
+ Acked bool `json:"acked"`
+ StoreStatus string `json:"store_status,omitempty"`
+}
+
+func (wms *WarnMetricStatus) ToJson() string {
+ b, _ := json.Marshal(wms)
+ return string(b)
+}
+
+func WarnMetricStatusFromJson(data io.Reader) *WarnMetricStatus {
+ var o WarnMetricStatus
+ if err := json.NewDecoder(data).Decode(&o); err != nil {
+ return nil
+ } else {
+ return &o
+ }
+}
+
+func MapWarnMetricStatusToJson(o map[string]*WarnMetricStatus) string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+type SendWarnMetricAck struct {
+ ForceAck bool `json:"forceAck"`
+}
+
+func (swma *SendWarnMetricAck) ToJson() string {
+ b, _ := json.Marshal(swma)
+ return string(b)
+}
+
+func SendWarnMetricAckFromJson(r io.Reader) *SendWarnMetricAck {
+ var swma *SendWarnMetricAck
+ json.NewDecoder(r).Decode(&swma)
+ return swma
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/team_search.go b/vendor/github.com/mattermost/mattermost-server/v5/model/team_search.go
index b8b1fe30..f9de5801 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/team_search.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/team_search.go
@@ -9,9 +9,12 @@ import (
)
type TeamSearch struct {
- Term string `json:"term"`
- Page *int `json:"page,omitempty"`
- PerPage *int `json:"per_page,omitempty"`
+ Term string `json:"term"`
+ Page *int `json:"page,omitempty"`
+ PerPage *int `json:"per_page,omitempty"`
+ AllowOpenInvite *bool `json:"allow_open_invite,omitempty"`
+ GroupConstrained *bool `json:"group_constrained,omitempty"`
+ IncludeGroupConstrained *bool `json:"include_group_constrained,omitempty"`
}
func (t *TeamSearch) IsPaginated() bool {
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/typing_request.go b/vendor/github.com/mattermost/mattermost-server/v5/model/typing_request.go
new file mode 100644
index 00000000..e2e9d3bf
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/typing_request.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See LICENSE.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+type TypingRequest struct {
+ ChannelId string `json:"channel_id"`
+ ParentId string `json:"parent_id"`
+}
+
+func (o *TypingRequest) ToJson() string {
+ b, _ := json.Marshal(o)
+ return string(b)
+}
+
+func TypingRequestFromJson(data io.Reader) *TypingRequest {
+ var o *TypingRequest
+ json.NewDecoder(data).Decode(&o)
+ return o
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/upload_session.go b/vendor/github.com/mattermost/mattermost-server/v5/model/upload_session.go
new file mode 100644
index 00000000..663ee0b1
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/upload_session.go
@@ -0,0 +1,141 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See LICENSE.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+)
+
+// UploadType defines the type of an upload.
+type UploadType string
+
+const (
+ UploadTypeAttachment UploadType = "attachment"
+ UploadTypeImport UploadType = "import"
+)
+
+// UploadSession contains information used to keep track of a file upload.
+type UploadSession struct {
+ // The unique identifier for the session.
+ Id string `json:"id"`
+ // The type of the upload.
+ Type UploadType `json:"type"`
+ // The timestamp of creation.
+ CreateAt int64 `json:"create_at"`
+ // The id of the user performing the upload.
+ UserId string `json:"user_id"`
+ // The id of the channel to upload to.
+ ChannelId string `json:"channel_id"`
+ // The name of the file to upload.
+ Filename string `json:"filename"`
+ // The path where the file is stored.
+ Path string `json:"-"`
+ // The size of the file to upload.
+ FileSize int64 `json:"file_size"`
+ // The amount of received data in bytes. If equal to FileSize it means the
+ // upload has finished.
+ FileOffset int64 `json:"file_offset"`
+}
+
+// ToJson serializes the UploadSession into JSON and returns it as string.
+func (us *UploadSession) ToJson() string {
+ b, _ := json.Marshal(us)
+ return string(b)
+}
+
+// UploadSessionsToJson serializes a list of UploadSession into JSON and
+// returns it as string.
+func UploadSessionsToJson(uss []*UploadSession) string {
+ b, _ := json.Marshal(uss)
+ return string(b)
+}
+
+// UploadSessionsFromJson deserializes a list of UploadSession from JSON data.
+func UploadSessionsFromJson(data io.Reader) []*UploadSession {
+ decoder := json.NewDecoder(data)
+ var uss []*UploadSession
+ if err := decoder.Decode(&uss); err != nil {
+ return nil
+ }
+ return uss
+}
+
+// UploadSessionFromJson deserializes the UploadSession from JSON data.
+func UploadSessionFromJson(data io.Reader) *UploadSession {
+ decoder := json.NewDecoder(data)
+ var us UploadSession
+ if err := decoder.Decode(&us); err != nil {
+ return nil
+ }
+ return &us
+}
+
+// PreSave is a utility function used to fill required information.
+func (us *UploadSession) PreSave() {
+ if us.Id == "" {
+ us.Id = NewId()
+ }
+
+ if us.CreateAt == 0 {
+ us.CreateAt = GetMillis()
+ }
+}
+
+// IsValid validates an UploadType. It returns an error in case of
+// failure.
+func (t UploadType) IsValid() error {
+ switch t {
+ case UploadTypeAttachment:
+ return nil
+ case UploadTypeImport:
+ return nil
+ default:
+ }
+ return fmt.Errorf("invalid UploadType %s", t)
+}
+
+// IsValid validates an UploadSession. It returns an error in case of
+// failure.
+func (us *UploadSession) IsValid() *AppError {
+ if !IsValidId(us.Id) {
+ return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.id.app_error", nil, "", http.StatusBadRequest)
+ }
+
+ if err := us.Type.IsValid(); err != nil {
+ return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.type.app_error", nil, err.Error(), http.StatusBadRequest)
+ }
+
+ if !IsValidId(us.UserId) {
+ return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.user_id.app_error", nil, "id="+us.Id, http.StatusBadRequest)
+ }
+
+ if us.Type == UploadTypeAttachment && !IsValidId(us.ChannelId) {
+ return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.channel_id.app_error", nil, "id="+us.Id, http.StatusBadRequest)
+ }
+
+ if us.CreateAt == 0 {
+ return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.create_at.app_error", nil, "id="+us.Id, http.StatusBadRequest)
+ }
+
+ if us.Filename == "" {
+ return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.filename.app_error", nil, "id="+us.Id, http.StatusBadRequest)
+ }
+
+ if us.FileSize <= 0 {
+ return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.file_size.app_error", nil, "id="+us.Id, http.StatusBadRequest)
+ }
+
+ if us.FileOffset < 0 || us.FileOffset > us.FileSize {
+ return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.file_offset.app_error", nil, "id="+us.Id, http.StatusBadRequest)
+ }
+
+ if us.Path == "" {
+ return NewAppError("UploadSession.IsValid", "model.upload_session.is_valid.path.app_error", nil, "id="+us.Id, http.StatusBadRequest)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user.go
index 168605ad..4e4d067c 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/user.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/user.go
@@ -59,6 +59,11 @@ const (
USER_LOCALE_MAX_LENGTH = 5
)
+//msgp:tuple User
+
+// User contains the details about the user.
+// This struct's serializer methods are auto-generated. If a new field is added/removed,
+// please run make gen-serialized.
type User struct {
Id string `json:"id"`
CreateAt int64 `json:"create_at,omitempty"`
@@ -124,6 +129,7 @@ type UserForIndexing struct {
Nickname string `json:"nickname"`
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
+ Roles string `json:"roles"`
CreateAt int64 `json:"create_at"`
DeleteAt int64 `json:"delete_at"`
TeamsIds []string `json:"team_id"`
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user_count.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user_count.go
index 3c20b23a..ee474883 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/user_count.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/user_count.go
@@ -13,6 +13,14 @@ type UserCountOptions struct {
ExcludeRegularUsers bool
// Only include users on a specific team. "" for any team.
TeamId string
+ // Only include users on a specific channel. "" for any channel.
+ ChannelId string
// Restrict to search in a list of teams and channels
ViewRestrictions *ViewUsersRestrictions
+ // Only include users matching any of the given system wide roles.
+ Roles []string
+ // Only include users matching any of the given channel roles, must be used with ChannelId.
+ ChannelRoles []string
+ // Only include users matching any of the given team roles, must be used with TeamId.
+ TeamRoles []string
}
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user_get.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user_get.go
index f865d53c..2748d735 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/user_get.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/user_get.go
@@ -12,6 +12,8 @@ type UserGetOptions struct {
InChannelId string
// Filters the users not in the channel
NotInChannelId string
+ // Filters the users in the group
+ InGroupId string
// Filters the users group constrained
GroupConstrained bool
// Filters the users without a team
@@ -22,6 +24,12 @@ type UserGetOptions struct {
Active bool
// Filters for the given role
Role string
+ // Filters for users matching any of the given system wide roles
+ Roles []string
+ // Filters for users matching any of the given channel roles, must be used with InChannelId
+ ChannelRoles []string
+ // Filters for users matching any of the given team roles, must be used with InTeamId
+ TeamRoles []string
// Sorting option
Sort string
// Restrict to search in a list of teams and channels
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/user_search.go b/vendor/github.com/mattermost/mattermost-server/v5/model/user_search.go
index fa9fa8a2..0a721eac 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/user_search.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/user_search.go
@@ -13,16 +13,20 @@ const USER_SEARCH_DEFAULT_LIMIT = 100
// UserSearch captures the parameters provided by a client for initiating a user search.
type UserSearch struct {
- Term string `json:"term"`
- TeamId string `json:"team_id"`
- NotInTeamId string `json:"not_in_team_id"`
- InChannelId string `json:"in_channel_id"`
- NotInChannelId string `json:"not_in_channel_id"`
- GroupConstrained bool `json:"group_constrained"`
- AllowInactive bool `json:"allow_inactive"`
- WithoutTeam bool `json:"without_team"`
- Limit int `json:"limit"`
- Role string `json:"role"`
+ Term string `json:"term"`
+ TeamId string `json:"team_id"`
+ NotInTeamId string `json:"not_in_team_id"`
+ InChannelId string `json:"in_channel_id"`
+ NotInChannelId string `json:"not_in_channel_id"`
+ InGroupId string `json:"in_group_id"`
+ GroupConstrained bool `json:"group_constrained"`
+ AllowInactive bool `json:"allow_inactive"`
+ WithoutTeam bool `json:"without_team"`
+ Limit int `json:"limit"`
+ Role string `json:"role"`
+ Roles []string `json:"roles"`
+ ChannelRoles []string `json:"channel_roles"`
+ TeamRoles []string `json:"team_roles"`
}
// ToJson convert a User to a json string
@@ -60,6 +64,12 @@ type UserSearchOptions struct {
Limit int
// Filters for the given role
Role string
+ // Filters for users that have any of the given system roles
+ Roles []string
+ // Filters for users that have the given channel roles to be used when searching in a channel
+ ChannelRoles []string
+ // Filters for users that have the given team roles to be used when searching in a team
+ TeamRoles []string
// Restrict to search in a list of teams and channels
ViewRestrictions *ViewUsersRestrictions
// List of allowed channels
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/utils.go b/vendor/github.com/mattermost/mattermost-server/v5/model/utils.go
index e75fb022..2ab71090 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/utils.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/utils.go
@@ -659,12 +659,12 @@ func AsStringBoolMap(list []string) map[string]bool {
// SanitizeUnicode will remove undesirable Unicode characters from a string.
func SanitizeUnicode(s string) string {
- return strings.Map(filterBlacklist, s)
+ return strings.Map(filterBlocklist, s)
}
-// filterBlacklist returns `r` if it is not in the blacklist, otherwise drop (-1).
-// Blacklist is taken from https://www.w3.org/TR/unicode-xml/#Charlist
-func filterBlacklist(r rune) rune {
+// filterBlocklist returns `r` if it is not in the blocklist, otherwise drop (-1).
+// Blocklist is taken from https://www.w3.org/TR/unicode-xml/#Charlist
+func filterBlocklist(r rune) rune {
const drop = -1
switch r {
case '\u0340', '\u0341': // clones of grave and acute; deprecated in Unicode
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/version.go b/vendor/github.com/mattermost/mattermost-server/v5/model/version.go
index 3cefff8c..11e0427a 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/version.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/version.go
@@ -13,6 +13,9 @@ import (
// It should be maintained in chronological order with most current
// release at the front of the list.
var versions = []string{
+ "5.28.0",
+ "5.27.0",
+ "5.26.0",
"5.25.0",
"5.24.0",
"5.23.0",
diff --git a/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_message.go b/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_message.go
index b3e4b186..281b50cf 100644
--- a/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_message.go
+++ b/vendor/github.com/mattermost/mattermost-server/v5/model/websocket_message.go
@@ -62,6 +62,12 @@ const (
WEBSOCKET_EVENT_RECEIVED_GROUP_NOT_ASSOCIATED_TO_TEAM = "received_group_not_associated_to_team"
WEBSOCKET_EVENT_RECEIVED_GROUP_ASSOCIATED_TO_CHANNEL = "received_group_associated_to_channel"
WEBSOCKET_EVENT_RECEIVED_GROUP_NOT_ASSOCIATED_TO_CHANNEL = "received_group_not_associated_to_channel"
+ WEBSOCKET_EVENT_SIDEBAR_CATEGORY_CREATED = "sidebar_category_created"
+ WEBSOCKET_EVENT_SIDEBAR_CATEGORY_UPDATED = "sidebar_category_updated"
+ WEBSOCKET_EVENT_SIDEBAR_CATEGORY_DELETED = "sidebar_category_deleted"
+ WEBSOCKET_EVENT_SIDEBAR_CATEGORY_ORDER_UPDATED = "sidebar_category_order_updated"
+ WEBSOCKET_WARN_METRIC_STATUS_RECEIVED = "warn_metric_status_received"
+ WEBSOCKET_WARN_METRIC_STATUS_REMOVED = "warn_metric_status_removed"
)
type WebSocketMessage interface {