diff options
Diffstat (limited to 'vendor')
267 files changed, 36147 insertions, 5565 deletions
diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go new file mode 100644 index 00000000..102538bd --- /dev/null +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/base64" + "errors" + "net" + "net/http" + "net/url" + "strings" +) + +type netDialerFunc func(netowrk, addr string) (net.Conn, error) + +func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { + return fn(network, addr) +} + +func init() { + proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { + return &httpProxyDialer{proxyURL: proxyURL, fowardDial: forwardDialer.Dial}, nil + }) +} + +type httpProxyDialer struct { + proxyURL *url.URL + fowardDial func(network, addr string) (net.Conn, error) +} + +func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { + hostPort, _ := hostPortNoPort(hpd.proxyURL) + conn, err := hpd.fowardDial(network, hostPort) + if err != nil { + return nil, err + } + + connectHeader := make(http.Header) + if user := hpd.proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + + connectReq := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: connectHeader, + } + + if err := connectReq.Write(conn); err != nil { + conn.Close() + return nil, err + } + + // Read response. It's OK to use and discard buffered reader here becaue + // the remote server does not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + conn.Close() + return nil, err + } + + if resp.StatusCode != 200 { + conn.Close() + f := strings.SplitN(resp.Status, " ", 2) + return nil, errors.New(f[1]) + } + return conn, nil +} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go new file mode 100644 index 00000000..2e668f6b --- /dev/null +++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go @@ -0,0 +1,473 @@ +// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. +//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy + +// Package proxy provides support for a variety of protocols to proxy network +// data. +// + +package websocket + +import ( + "errors" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" +) + +type proxy_direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var proxy_Direct = proxy_direct{} + +func (proxy_direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type proxy_PerHost struct { + def, bypass proxy_Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { + return &proxy_PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *proxy_PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *proxy_PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *proxy_PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *proxy_PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} + +// A Dialer is a means to establish a connection. +type proxy_Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type proxy_Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func proxy_FromEnvironment() proxy_Dialer { + allProxy := proxy_allProxyEnv.Get() + if len(allProxy) == 0 { + return proxy_Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return proxy_Direct + } + proxy, err := proxy_FromURL(proxyURL, proxy_Direct) + if err != nil { + return proxy_Direct + } + + noProxy := proxy_noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := proxy_NewPerHost(proxy, proxy_Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { + if proxy_proxySchemes == nil { + proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) + } + proxy_proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { + var auth *proxy_Auth + if u.User != nil { + auth = new(proxy_Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return proxy_SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxy_proxySchemes != nil { + if f, ok := proxy_proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + proxy_allProxyEnv = &proxy_envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + proxy_noProxyEnv = &proxy_envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type proxy_envOnce struct { + names []string + once sync.Once + val string +} + +func (e *proxy_envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *proxy_envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { + s := &proxy_socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type proxy_socks5 struct { + user, password string + network, addr string + forward proxy_Dialer +} + +const proxy_socks5Version = 5 + +const ( + proxy_socks5AuthNone = 0 + proxy_socks5AuthPassword = 2 +) + +const proxy_socks5Connect = 1 + +const ( + proxy_socks5IP4 = 1 + proxy_socks5Domain = 3 + proxy_socks5IP6 = 4 +) + +var proxy_socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *proxy_socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, proxy_socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == proxy_socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, proxy_socks5IP4) + ip = ip4 + } else { + buf = append(buf, proxy_socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, proxy_socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(proxy_socks5Errors) { + failure = proxy_socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case proxy_socks5IP4: + bytesToDiscard = net.IPv4len + case proxy_socks5IP6: + bytesToDiscard = net.IPv6len + case proxy_socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/github.com/mattermost/platform/einterfaces/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/einterfaces/LICENSE.txt index ead98cf0..ead98cf0 100644 --- a/vendor/github.com/mattermost/platform/einterfaces/LICENSE.txt +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/LICENSE.txt diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/account_migration.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/account_migration.go new file mode 100644 index 00000000..0db516d7 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/account_migration.go @@ -0,0 +1,10 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package einterfaces + +import "github.com/mattermost/mattermost-server/model" + +type AccountMigrationInterface interface { + MigrateToLdap(fromAuthService string, forignUserFieldNameToMatch string, force bool) *model.AppError +} diff --git a/vendor/github.com/mattermost/platform/einterfaces/brand.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/brand.go index f4e16eb0..fc584a91 100644 --- a/vendor/github.com/mattermost/platform/einterfaces/brand.go +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/brand.go @@ -4,7 +4,7 @@ package einterfaces import ( - "github.com/mattermost/platform/model" + "github.com/mattermost/mattermost-server/model" "mime/multipart" ) @@ -12,13 +12,3 @@ type BrandInterface interface { SaveBrandImage(*multipart.FileHeader) *model.AppError GetBrandImage() ([]byte, *model.AppError) } - -var theBrandInterface BrandInterface - -func RegisterBrandInterface(newInterface BrandInterface) { - theBrandInterface = newInterface -} - -func GetBrandInterface() BrandInterface { - return theBrandInterface -} diff --git a/vendor/github.com/mattermost/platform/einterfaces/cluster.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/cluster.go index 096a775f..b5ef4772 100644 --- a/vendor/github.com/mattermost/platform/einterfaces/cluster.go +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/cluster.go @@ -4,7 +4,7 @@ package einterfaces import ( - "github.com/mattermost/platform/model" + "github.com/mattermost/mattermost-server/model" ) type ClusterMessageHandler func(msg *model.ClusterMessage) @@ -14,6 +14,8 @@ type ClusterInterface interface { StopInterNodeCommunication() RegisterClusterMessageHandler(event string, crm ClusterMessageHandler) GetClusterId() string + IsLeader() bool + GetMyClusterInfo() *model.ClusterInfo GetClusterInfos() []*model.ClusterInfo SendClusterMessage(cluster *model.ClusterMessage) NotifyMsg(buf []byte) @@ -21,13 +23,3 @@ type ClusterInterface interface { GetLogs(page, perPage int) ([]string, *model.AppError) ConfigChanged(previousConfig *model.Config, newConfig *model.Config, sendToOtherServer bool) *model.AppError } - -var theClusterInterface ClusterInterface - -func RegisterClusterInterface(newInterface ClusterInterface) { - theClusterInterface = newInterface -} - -func GetClusterInterface() ClusterInterface { - return theClusterInterface -} diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/compliance.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/compliance.go new file mode 100644 index 00000000..14927bee --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/compliance.go @@ -0,0 +1,13 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package einterfaces + +import ( + "github.com/mattermost/mattermost-server/model" +) + +type ComplianceInterface interface { + StartComplianceDailyJob() + RunComplianceJob(job *model.Compliance) *model.AppError +} diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/data_retention.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/data_retention.go new file mode 100644 index 00000000..07f7d387 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/data_retention.go @@ -0,0 +1,12 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package einterfaces + +import ( + "github.com/mattermost/mattermost-server/model" +) + +type DataRetentionInterface interface { + GetPolicy() (*model.DataRetentionPolicy, *model.AppError) +} diff --git a/vendor/github.com/mattermost/platform/einterfaces/elasticsearch.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/elasticsearch.go index 011b259b..5582fd4e 100644 --- a/vendor/github.com/mattermost/platform/einterfaces/elasticsearch.go +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/elasticsearch.go @@ -3,7 +3,11 @@ package einterfaces -import "github.com/mattermost/platform/model" +import ( + "time" + + "github.com/mattermost/mattermost-server/model" +) type ElasticsearchInterface interface { Start() *model.AppError @@ -12,14 +16,5 @@ type ElasticsearchInterface interface { DeletePost(post *model.Post) *model.AppError TestConfig(cfg *model.Config) *model.AppError PurgeIndexes() *model.AppError -} - -var theElasticsearchInterface ElasticsearchInterface - -func RegisterElasticsearchInterface(newInterface ElasticsearchInterface) { - theElasticsearchInterface = newInterface -} - -func GetElasticsearchInterface() ElasticsearchInterface { - return theElasticsearchInterface + DataRetentionDeleteIndexes(cutoff time.Time) *model.AppError } diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/emoji.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/emoji.go new file mode 100644 index 00000000..b8d61e74 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/emoji.go @@ -0,0 +1,12 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package einterfaces + +import ( + "github.com/mattermost/mattermost-server/model" +) + +type EmojiInterface interface { + CanUserCreateEmoji(string, []*model.TeamMember) bool +} diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/data_retention.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/data_retention.go new file mode 100644 index 00000000..73f78e4f --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/data_retention.go @@ -0,0 +1,13 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package jobs + +import ( + "github.com/mattermost/mattermost-server/model" +) + +type DataRetentionJobInterface interface { + MakeWorker() model.Worker + MakeScheduler() model.Scheduler +} diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/elasticsearch.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/elasticsearch.go new file mode 100644 index 00000000..16e0d769 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/elasticsearch.go @@ -0,0 +1,17 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package jobs + +import ( + "github.com/mattermost/mattermost-server/model" +) + +type ElasticsearchIndexerInterface interface { + MakeWorker() model.Worker +} + +type ElasticsearchAggregatorInterface interface { + MakeWorker() model.Worker + MakeScheduler() model.Scheduler +} diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/ldap_sync.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/ldap_sync.go new file mode 100644 index 00000000..5565afe4 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/ldap_sync.go @@ -0,0 +1,13 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package jobs + +import ( + "github.com/mattermost/mattermost-server/model" +) + +type LdapSyncInterface interface { + MakeWorker() model.Worker + MakeScheduler() model.Scheduler +} diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/message_export.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/message_export.go new file mode 100644 index 00000000..74b0df75 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/jobs/message_export.go @@ -0,0 +1,13 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package jobs + +import ( + "github.com/mattermost/mattermost-server/model" +) + +type MessageExportJobInterface interface { + MakeWorker() model.Worker + MakeScheduler() model.Scheduler +} diff --git a/vendor/github.com/mattermost/platform/einterfaces/ldap.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/ldap.go index 721c8d30..26326b17 100644 --- a/vendor/github.com/mattermost/platform/einterfaces/ldap.go +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/ldap.go @@ -4,28 +4,22 @@ package einterfaces import ( - "github.com/mattermost/platform/model" + "github.com/go-ldap/ldap" + + "github.com/mattermost/mattermost-server/model" ) type LdapInterface interface { DoLogin(id string, password string) (*model.User, *model.AppError) GetUser(id string) (*model.User, *model.AppError) + GetUserAttributes(id string, attributes []string) (map[string]string, *model.AppError) CheckPassword(id string, password string) *model.AppError SwitchToLdap(userId, ldapId, ldapPassword string) *model.AppError ValidateFilter(filter string) *model.AppError - Syncronize() *model.AppError - StartLdapSyncJob() - SyncNow() + StartSynchronizeJob(waitForJobToFinish bool) (*model.Job, *model.AppError) RunTest() *model.AppError GetAllLdapUsers() ([]*model.User, *model.AppError) -} - -var theLdapInterface LdapInterface - -func RegisterLdapInterface(newInterface LdapInterface) { - theLdapInterface = newInterface -} - -func GetLdapInterface() LdapInterface { - return theLdapInterface + UserFromLdapUser(ldapUser *ldap.Entry) *model.User + UserHasUpdateFromLdap(existingUser *model.User, currentLdapUser *model.User) bool + UpdateLocalLdapUser(existingUser *model.User, currentLdapUser *model.User) *model.User } diff --git a/vendor/github.com/mattermost/mattermost-server/einterfaces/message_export.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/message_export.go new file mode 100644 index 00000000..ba498cdf --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/message_export.go @@ -0,0 +1,14 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package einterfaces + +import ( + "context" + + "github.com/mattermost/mattermost-server/model" +) + +type MessageExportInterface interface { + StartSynchronizeJob(ctx context.Context, exportFromTimestamp int64) (*model.Job, *model.AppError) +} diff --git a/vendor/github.com/mattermost/platform/einterfaces/metrics.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/metrics.go index e1ef9c15..a88fe63c 100644 --- a/vendor/github.com/mattermost/platform/einterfaces/metrics.go +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/metrics.go @@ -37,14 +37,7 @@ type MetricsInterface interface { AddMemCacheHitCounter(cacheName string, amount float64) AddMemCacheMissCounter(cacheName string, amount float64) -} - -var theMetricsInterface MetricsInterface - -func RegisterMetricsInterface(newInterface MetricsInterface) { - theMetricsInterface = newInterface -} -func GetMetricsInterface() MetricsInterface { - return theMetricsInterface + IncrementPostsSearchCounter() + ObservePostsSearchDuration(elapsed float64) } diff --git a/vendor/github.com/mattermost/platform/einterfaces/mfa.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/mfa.go index e1b6ecbc..3afe961e 100644 --- a/vendor/github.com/mattermost/platform/einterfaces/mfa.go +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/mfa.go @@ -4,7 +4,7 @@ package einterfaces import ( - "github.com/mattermost/platform/model" + "github.com/mattermost/mattermost-server/model" ) type MfaInterface interface { @@ -13,13 +13,3 @@ type MfaInterface interface { Deactivate(userId string) *model.AppError ValidateToken(secret, token string) (bool, *model.AppError) } - -var theMfaInterface MfaInterface - -func RegisterMfaInterface(newInterface MfaInterface) { - theMfaInterface = newInterface -} - -func GetMfaInterface() MfaInterface { - return theMfaInterface -} diff --git a/vendor/github.com/mattermost/platform/einterfaces/oauthproviders.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/oauthproviders.go index b58e8307..7e24d2a7 100644 --- a/vendor/github.com/mattermost/platform/einterfaces/oauthproviders.go +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/oauthproviders.go @@ -4,7 +4,7 @@ package einterfaces import ( - "github.com/mattermost/platform/model" + "github.com/mattermost/mattermost-server/model" "io" ) diff --git a/vendor/github.com/mattermost/platform/einterfaces/saml.go b/vendor/github.com/mattermost/mattermost-server/einterfaces/saml.go index 3e020891..833a3d43 100644 --- a/vendor/github.com/mattermost/platform/einterfaces/saml.go +++ b/vendor/github.com/mattermost/mattermost-server/einterfaces/saml.go @@ -4,7 +4,7 @@ package einterfaces import ( - "github.com/mattermost/platform/model" + "github.com/mattermost/mattermost-server/model" ) type SamlInterface interface { @@ -13,13 +13,3 @@ type SamlInterface interface { DoLogin(encodedXML string, relayState map[string]string) (*model.User, *model.AppError) GetMetadata() (string, *model.AppError) } - -var theSamlInterface SamlInterface - -func RegisterSamlInterface(newInterface SamlInterface) { - theSamlInterface = newInterface -} - -func GetSamlInterface() SamlInterface { - return theSamlInterface -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/model/LICENSE.txt index ead98cf0..ead98cf0 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/LICENSE.txt +++ b/vendor/github.com/mattermost/mattermost-server/model/LICENSE.txt diff --git a/vendor/github.com/mattermost/mattermost-server/model/access.go b/vendor/github.com/mattermost/mattermost-server/model/access.go new file mode 100644 index 00000000..e9603c78 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/access.go @@ -0,0 +1,96 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" +) + +const ( + ACCESS_TOKEN_GRANT_TYPE = "authorization_code" + ACCESS_TOKEN_TYPE = "bearer" + REFRESH_TOKEN_GRANT_TYPE = "refresh_token" +) + +type AccessData struct { + ClientId string `json:"client_id"` + UserId string `json:"user_id"` + Token string `json:"token"` + RefreshToken string `json:"refresh_token"` + RedirectUri string `json:"redirect_uri"` + ExpiresAt int64 `json:"expires_at"` + Scope string `json:"scope"` +} + +type AccessResponse struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int32 `json:"expires_in"` + Scope string `json:"scope"` + RefreshToken string `json:"refresh_token"` +} + +// IsValid validates the AccessData and returns an error if it isn't configured +// correctly. +func (ad *AccessData) IsValid() *AppError { + + if len(ad.ClientId) == 0 || len(ad.ClientId) > 26 { + return NewAppError("AccessData.IsValid", "model.access.is_valid.client_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(ad.UserId) == 0 || len(ad.UserId) > 26 { + return NewAppError("AccessData.IsValid", "model.access.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(ad.Token) != 26 { + return NewAppError("AccessData.IsValid", "model.access.is_valid.access_token.app_error", nil, "", http.StatusBadRequest) + } + + if len(ad.RefreshToken) > 26 { + return NewAppError("AccessData.IsValid", "model.access.is_valid.refresh_token.app_error", nil, "", http.StatusBadRequest) + } + + if len(ad.RedirectUri) == 0 || len(ad.RedirectUri) > 256 || !IsValidHttpUrl(ad.RedirectUri) { + return NewAppError("AccessData.IsValid", "model.access.is_valid.redirect_uri.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (me *AccessData) IsExpired() bool { + + if me.ExpiresAt <= 0 { + return false + } + + if GetMillis() > me.ExpiresAt { + return true + } + + return false +} + +func (ad *AccessData) ToJson() string { + b, _ := json.Marshal(ad) + return string(b) +} + +func AccessDataFromJson(data io.Reader) *AccessData { + var ad *AccessData + json.NewDecoder(data).Decode(&ad) + return ad +} + +func (ar *AccessResponse) ToJson() string { + b, _ := json.Marshal(ar) + return string(b) +} + +func AccessResponseFromJson(data io.Reader) *AccessResponse { + var ar *AccessResponse + json.NewDecoder(data).Decode(&ar) + return ar +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/analytics_row.go b/vendor/github.com/mattermost/mattermost-server/model/analytics_row.go new file mode 100644 index 00000000..4615bb79 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/analytics_row.go @@ -0,0 +1,41 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type AnalyticsRow struct { + Name string `json:"name"` + Value float64 `json:"value"` +} + +type AnalyticsRows []*AnalyticsRow + +func (me *AnalyticsRow) ToJson() string { + b, _ := json.Marshal(me) + return string(b) +} + +func AnalyticsRowFromJson(data io.Reader) *AnalyticsRow { + var me *AnalyticsRow + json.NewDecoder(data).Decode(&me) + return me +} + +func (me AnalyticsRows) ToJson() string { + if b, err := json.Marshal(me); err != nil { + return "[]" + } else { + return string(b) + } +} + +func AnalyticsRowsFromJson(data io.Reader) AnalyticsRows { + var me AnalyticsRows + json.NewDecoder(data).Decode(&me) + return me +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/audit.go b/vendor/github.com/mattermost/mattermost-server/model/audit.go new file mode 100644 index 00000000..e3d1bdf9 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/audit.go @@ -0,0 +1,30 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type Audit struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at"` + UserId string `json:"user_id"` + Action string `json:"action"` + ExtraInfo string `json:"extra_info"` + IpAddress string `json:"ip_address"` + SessionId string `json:"session_id"` +} + +func (o *Audit) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func AuditFromJson(data io.Reader) *Audit { + var o *Audit + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/audits.go b/vendor/github.com/mattermost/mattermost-server/model/audits.go new file mode 100644 index 00000000..3673eb61 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/audits.go @@ -0,0 +1,34 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type Audits []Audit + +func (o Audits) Etag() string { + if len(o) > 0 { + // the first in the list is always the most current + return Etag(o[0].CreateAt) + } else { + return "" + } +} + +func (o Audits) ToJson() string { + if b, err := json.Marshal(o); err != nil { + return "[]" + } else { + return string(b) + } +} + +func AuditsFromJson(data io.Reader) Audits { + var o Audits + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/authorization.go b/vendor/github.com/mattermost/mattermost-server/model/authorization.go new file mode 100644 index 00000000..9f4e36ea --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/authorization.go @@ -0,0 +1,522 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +type Permission struct { + Id string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` +} + +type Role struct { + Id string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Permissions []string `json:"permissions"` +} + +var PERMISSION_INVITE_USER *Permission +var PERMISSION_ADD_USER_TO_TEAM *Permission +var PERMISSION_USE_SLASH_COMMANDS *Permission +var PERMISSION_MANAGE_SLASH_COMMANDS *Permission +var PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS *Permission +var PERMISSION_CREATE_PUBLIC_CHANNEL *Permission +var PERMISSION_CREATE_PRIVATE_CHANNEL *Permission +var PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS *Permission +var PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS *Permission +var PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE *Permission +var PERMISSION_MANAGE_ROLES *Permission +var PERMISSION_MANAGE_TEAM_ROLES *Permission +var PERMISSION_MANAGE_CHANNEL_ROLES *Permission +var PERMISSION_CREATE_DIRECT_CHANNEL *Permission +var PERMISSION_CREATE_GROUP_CHANNEL *Permission +var PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES *Permission +var PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES *Permission +var PERMISSION_LIST_TEAM_CHANNELS *Permission +var PERMISSION_JOIN_PUBLIC_CHANNELS *Permission +var PERMISSION_DELETE_PUBLIC_CHANNEL *Permission +var PERMISSION_DELETE_PRIVATE_CHANNEL *Permission +var PERMISSION_EDIT_OTHER_USERS *Permission +var PERMISSION_READ_CHANNEL *Permission +var PERMISSION_READ_PUBLIC_CHANNEL *Permission +var PERMISSION_PERMANENT_DELETE_USER *Permission +var PERMISSION_UPLOAD_FILE *Permission +var PERMISSION_GET_PUBLIC_LINK *Permission +var PERMISSION_MANAGE_WEBHOOKS *Permission +var PERMISSION_MANAGE_OTHERS_WEBHOOKS *Permission +var PERMISSION_MANAGE_OAUTH *Permission +var PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH *Permission +var PERMISSION_CREATE_POST *Permission +var PERMISSION_CREATE_POST_PUBLIC *Permission +var PERMISSION_EDIT_POST *Permission +var PERMISSION_EDIT_OTHERS_POSTS *Permission +var PERMISSION_DELETE_POST *Permission +var PERMISSION_DELETE_OTHERS_POSTS *Permission +var PERMISSION_REMOVE_USER_FROM_TEAM *Permission +var PERMISSION_CREATE_TEAM *Permission +var PERMISSION_MANAGE_TEAM *Permission +var PERMISSION_IMPORT_TEAM *Permission +var PERMISSION_VIEW_TEAM *Permission +var PERMISSION_LIST_USERS_WITHOUT_TEAM *Permission +var PERMISSION_MANAGE_JOBS *Permission +var PERMISSION_CREATE_USER_ACCESS_TOKEN *Permission +var PERMISSION_READ_USER_ACCESS_TOKEN *Permission +var PERMISSION_REVOKE_USER_ACCESS_TOKEN *Permission + +// General permission that encompases all system admin functions +// in the future this could be broken up to allow access to some +// admin functions but not others +var PERMISSION_MANAGE_SYSTEM *Permission + +const ( + SYSTEM_USER_ROLE_ID = "system_user" + SYSTEM_ADMIN_ROLE_ID = "system_admin" + SYSTEM_POST_ALL_ROLE_ID = "system_post_all" + SYSTEM_POST_ALL_PUBLIC_ROLE_ID = "system_post_all_public" + SYSTEM_USER_ACCESS_TOKEN_ROLE_ID = "system_user_access_token" + + TEAM_USER_ROLE_ID = "team_user" + TEAM_ADMIN_ROLE_ID = "team_admin" + TEAM_POST_ALL_ROLE_ID = "team_post_all" + TEAM_POST_ALL_PUBLIC_ROLE_ID = "team_post_all_public" + + CHANNEL_USER_ROLE_ID = "channel_user" + CHANNEL_ADMIN_ROLE_ID = "channel_admin" + CHANNEL_GUEST_ROLE_ID = "guest" +) + +func initializePermissions() { + PERMISSION_INVITE_USER = &Permission{ + "invite_user", + "authentication.permissions.team_invite_user.name", + "authentication.permissions.team_invite_user.description", + } + PERMISSION_ADD_USER_TO_TEAM = &Permission{ + "add_user_to_team", + "authentication.permissions.add_user_to_team.name", + "authentication.permissions.add_user_to_team.description", + } + PERMISSION_USE_SLASH_COMMANDS = &Permission{ + "use_slash_commands", + "authentication.permissions.team_use_slash_commands.name", + "authentication.permissions.team_use_slash_commands.description", + } + PERMISSION_MANAGE_SLASH_COMMANDS = &Permission{ + "manage_slash_commands", + "authentication.permissions.manage_slash_commands.name", + "authentication.permissions.manage_slash_commands.description", + } + PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS = &Permission{ + "manage_others_slash_commands", + "authentication.permissions.manage_others_slash_commands.name", + "authentication.permissions.manage_others_slash_commands.description", + } + PERMISSION_CREATE_PUBLIC_CHANNEL = &Permission{ + "create_public_channel", + "authentication.permissions.create_public_channel.name", + "authentication.permissions.create_public_channel.description", + } + PERMISSION_CREATE_PRIVATE_CHANNEL = &Permission{ + "create_private_channel", + "authentication.permissions.create_private_channel.name", + "authentication.permissions.create_private_channel.description", + } + PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS = &Permission{ + "manage_public_channel_members", + "authentication.permissions.manage_public_channel_members.name", + "authentication.permissions.manage_public_channel_members.description", + } + PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS = &Permission{ + "manage_private_channel_members", + "authentication.permissions.manage_private_channel_members.name", + "authentication.permissions.manage_private_channel_members.description", + } + PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE = &Permission{ + "assign_system_admin_role", + "authentication.permissions.assign_system_admin_role.name", + "authentication.permissions.assign_system_admin_role.description", + } + PERMISSION_MANAGE_ROLES = &Permission{ + "manage_roles", + "authentication.permissions.manage_roles.name", + "authentication.permissions.manage_roles.description", + } + PERMISSION_MANAGE_TEAM_ROLES = &Permission{ + "manage_team_roles", + "authentication.permissions.manage_team_roles.name", + "authentication.permissions.manage_team_roles.description", + } + PERMISSION_MANAGE_CHANNEL_ROLES = &Permission{ + "manage_channel_roles", + "authentication.permissions.manage_channel_roles.name", + "authentication.permissions.manage_channel_roles.description", + } + PERMISSION_MANAGE_SYSTEM = &Permission{ + "manage_system", + "authentication.permissions.manage_system.name", + "authentication.permissions.manage_system.description", + } + PERMISSION_CREATE_DIRECT_CHANNEL = &Permission{ + "create_direct_channel", + "authentication.permissions.create_direct_channel.name", + "authentication.permissions.create_direct_channel.description", + } + PERMISSION_CREATE_GROUP_CHANNEL = &Permission{ + "create_group_channel", + "authentication.permissions.create_group_channel.name", + "authentication.permissions.create_group_channel.description", + } + PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES = &Permission{ + "manage__publicchannel_properties", + "authentication.permissions.manage_public_channel_properties.name", + "authentication.permissions.manage_public_channel_properties.description", + } + PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES = &Permission{ + "manage_private_channel_properties", + "authentication.permissions.manage_private_channel_properties.name", + "authentication.permissions.manage_private_channel_properties.description", + } + PERMISSION_LIST_TEAM_CHANNELS = &Permission{ + "list_team_channels", + "authentication.permissions.list_team_channels.name", + "authentication.permissions.list_team_channels.description", + } + PERMISSION_JOIN_PUBLIC_CHANNELS = &Permission{ + "join_public_channels", + "authentication.permissions.join_public_channels.name", + "authentication.permissions.join_public_channels.description", + } + PERMISSION_DELETE_PUBLIC_CHANNEL = &Permission{ + "delete_public_channel", + "authentication.permissions.delete_public_channel.name", + "authentication.permissions.delete_public_channel.description", + } + PERMISSION_DELETE_PRIVATE_CHANNEL = &Permission{ + "delete_private_channel", + "authentication.permissions.delete_private_channel.name", + "authentication.permissions.delete_private_channel.description", + } + PERMISSION_EDIT_OTHER_USERS = &Permission{ + "edit_other_users", + "authentication.permissions.edit_other_users.name", + "authentication.permissions.edit_other_users.description", + } + PERMISSION_READ_CHANNEL = &Permission{ + "read_channel", + "authentication.permissions.read_channel.name", + "authentication.permissions.read_channel.description", + } + PERMISSION_READ_PUBLIC_CHANNEL = &Permission{ + "read_public_channel", + "authentication.permissions.read_public_channel.name", + "authentication.permissions.read_public_channel.description", + } + PERMISSION_PERMANENT_DELETE_USER = &Permission{ + "permanent_delete_user", + "authentication.permissions.permanent_delete_user.name", + "authentication.permissions.permanent_delete_user.description", + } + PERMISSION_UPLOAD_FILE = &Permission{ + "upload_file", + "authentication.permissions.upload_file.name", + "authentication.permissions.upload_file.description", + } + PERMISSION_GET_PUBLIC_LINK = &Permission{ + "get_public_link", + "authentication.permissions.get_public_link.name", + "authentication.permissions.get_public_link.description", + } + PERMISSION_MANAGE_WEBHOOKS = &Permission{ + "manage_webhooks", + "authentication.permissions.manage_webhooks.name", + "authentication.permissions.manage_webhooks.description", + } + PERMISSION_MANAGE_OTHERS_WEBHOOKS = &Permission{ + "manage_others_webhooks", + "authentication.permissions.manage_others_webhooks.name", + "authentication.permissions.manage_others_webhooks.description", + } + PERMISSION_MANAGE_OAUTH = &Permission{ + "manage_oauth", + "authentication.permissions.manage_oauth.name", + "authentication.permissions.manage_oauth.description", + } + PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH = &Permission{ + "manage_sytem_wide_oauth", + "authentication.permissions.manage_sytem_wide_oauth.name", + "authentication.permissions.manage_sytem_wide_oauth.description", + } + PERMISSION_CREATE_POST = &Permission{ + "create_post", + "authentication.permissions.create_post.name", + "authentication.permissions.create_post.description", + } + PERMISSION_CREATE_POST_PUBLIC = &Permission{ + "create_post_public", + "authentication.permissions.create_post_public.name", + "authentication.permissions.create_post_public.description", + } + PERMISSION_EDIT_POST = &Permission{ + "edit_post", + "authentication.permissions.edit_post.name", + "authentication.permissions.edit_post.description", + } + PERMISSION_EDIT_OTHERS_POSTS = &Permission{ + "edit_others_posts", + "authentication.permissions.edit_others_posts.name", + "authentication.permissions.edit_others_posts.description", + } + PERMISSION_DELETE_POST = &Permission{ + "delete_post", + "authentication.permissions.delete_post.name", + "authentication.permissions.delete_post.description", + } + PERMISSION_DELETE_OTHERS_POSTS = &Permission{ + "delete_others_posts", + "authentication.permissions.delete_others_posts.name", + "authentication.permissions.delete_others_posts.description", + } + PERMISSION_REMOVE_USER_FROM_TEAM = &Permission{ + "remove_user_from_team", + "authentication.permissions.remove_user_from_team.name", + "authentication.permissions.remove_user_from_team.description", + } + PERMISSION_CREATE_TEAM = &Permission{ + "create_team", + "authentication.permissions.create_team.name", + "authentication.permissions.create_team.description", + } + PERMISSION_MANAGE_TEAM = &Permission{ + "manage_team", + "authentication.permissions.manage_team.name", + "authentication.permissions.manage_team.description", + } + PERMISSION_IMPORT_TEAM = &Permission{ + "import_team", + "authentication.permissions.import_team.name", + "authentication.permissions.import_team.description", + } + PERMISSION_VIEW_TEAM = &Permission{ + "view_team", + "authentication.permissions.view_team.name", + "authentication.permissions.view_team.description", + } + PERMISSION_LIST_USERS_WITHOUT_TEAM = &Permission{ + "list_users_without_team", + "authentication.permissions.list_users_without_team.name", + "authentication.permissions.list_users_without_team.description", + } + PERMISSION_CREATE_USER_ACCESS_TOKEN = &Permission{ + "create_user_access_token", + "authentication.permissions.create_user_access_token.name", + "authentication.permissions.create_user_access_token.description", + } + PERMISSION_READ_USER_ACCESS_TOKEN = &Permission{ + "read_user_access_token", + "authentication.permissions.read_user_access_token.name", + "authentication.permissions.read_user_access_token.description", + } + PERMISSION_REVOKE_USER_ACCESS_TOKEN = &Permission{ + "revoke_user_access_token", + "authentication.permissions.revoke_user_access_token.name", + "authentication.permissions.revoke_user_access_token.description", + } + PERMISSION_MANAGE_JOBS = &Permission{ + "manage_jobs", + "authentication.permisssions.manage_jobs.name", + "authentication.permisssions.manage_jobs.description", + } +} + +var DefaultRoles map[string]*Role + +func initializeDefaultRoles() { + DefaultRoles = make(map[string]*Role) + + DefaultRoles[CHANNEL_USER_ROLE_ID] = &Role{ + "channel_user", + "authentication.roles.channel_user.name", + "authentication.roles.channel_user.description", + []string{ + PERMISSION_READ_CHANNEL.Id, + PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id, + PERMISSION_UPLOAD_FILE.Id, + PERMISSION_GET_PUBLIC_LINK.Id, + PERMISSION_CREATE_POST.Id, + PERMISSION_EDIT_POST.Id, + PERMISSION_USE_SLASH_COMMANDS.Id, + }, + } + + DefaultRoles[CHANNEL_ADMIN_ROLE_ID] = &Role{ + "channel_admin", + "authentication.roles.channel_admin.name", + "authentication.roles.channel_admin.description", + []string{ + PERMISSION_MANAGE_CHANNEL_ROLES.Id, + }, + } + + DefaultRoles[CHANNEL_GUEST_ROLE_ID] = &Role{ + "guest", + "authentication.roles.global_guest.name", + "authentication.roles.global_guest.description", + []string{}, + } + + DefaultRoles[TEAM_USER_ROLE_ID] = &Role{ + "team_user", + "authentication.roles.team_user.name", + "authentication.roles.team_user.description", + []string{ + PERMISSION_LIST_TEAM_CHANNELS.Id, + PERMISSION_JOIN_PUBLIC_CHANNELS.Id, + PERMISSION_READ_PUBLIC_CHANNEL.Id, + PERMISSION_VIEW_TEAM.Id, + }, + } + + DefaultRoles[TEAM_POST_ALL_ROLE_ID] = &Role{ + "team_post_all", + "authentication.roles.team_post_all.name", + "authentication.roles.team_post_all.description", + []string{ + PERMISSION_CREATE_POST.Id, + }, + } + + DefaultRoles[TEAM_POST_ALL_PUBLIC_ROLE_ID] = &Role{ + "team_post_all_public", + "authentication.roles.team_post_all_public.name", + "authentication.roles.team_post_all_public.description", + []string{ + PERMISSION_CREATE_POST_PUBLIC.Id, + }, + } + + DefaultRoles[TEAM_ADMIN_ROLE_ID] = &Role{ + "team_admin", + "authentication.roles.team_admin.name", + "authentication.roles.team_admin.description", + []string{ + PERMISSION_EDIT_OTHERS_POSTS.Id, + PERMISSION_REMOVE_USER_FROM_TEAM.Id, + PERMISSION_MANAGE_TEAM.Id, + PERMISSION_IMPORT_TEAM.Id, + PERMISSION_MANAGE_TEAM_ROLES.Id, + PERMISSION_MANAGE_CHANNEL_ROLES.Id, + PERMISSION_MANAGE_OTHERS_WEBHOOKS.Id, + PERMISSION_MANAGE_SLASH_COMMANDS.Id, + PERMISSION_MANAGE_OTHERS_SLASH_COMMANDS.Id, + PERMISSION_MANAGE_WEBHOOKS.Id, + }, + } + + DefaultRoles[SYSTEM_USER_ROLE_ID] = &Role{ + "system_user", + "authentication.roles.global_user.name", + "authentication.roles.global_user.description", + []string{ + PERMISSION_CREATE_DIRECT_CHANNEL.Id, + PERMISSION_CREATE_GROUP_CHANNEL.Id, + PERMISSION_PERMANENT_DELETE_USER.Id, + }, + } + + DefaultRoles[SYSTEM_POST_ALL_ROLE_ID] = &Role{ + "system_post_all", + "authentication.roles.system_post_all.name", + "authentication.roles.system_post_all.description", + []string{ + PERMISSION_CREATE_POST.Id, + }, + } + + DefaultRoles[SYSTEM_POST_ALL_PUBLIC_ROLE_ID] = &Role{ + "system_post_all_public", + "authentication.roles.system_post_all_public.name", + "authentication.roles.system_post_all_public.description", + []string{ + PERMISSION_CREATE_POST_PUBLIC.Id, + }, + } + + DefaultRoles[SYSTEM_USER_ACCESS_TOKEN_ROLE_ID] = &Role{ + "system_user_access_token", + "authentication.roles.system_user_access_token.name", + "authentication.roles.system_user_access_token.description", + []string{ + PERMISSION_CREATE_USER_ACCESS_TOKEN.Id, + PERMISSION_READ_USER_ACCESS_TOKEN.Id, + PERMISSION_REVOKE_USER_ACCESS_TOKEN.Id, + }, + } + + DefaultRoles[SYSTEM_ADMIN_ROLE_ID] = &Role{ + "system_admin", + "authentication.roles.global_admin.name", + "authentication.roles.global_admin.description", + // System admins can do anything channel and team admins can do + // plus everything members of teams and channels can do to all teams + // and channels on the system + append( + append( + append( + append( + []string{ + PERMISSION_ASSIGN_SYSTEM_ADMIN_ROLE.Id, + PERMISSION_MANAGE_SYSTEM.Id, + PERMISSION_MANAGE_ROLES.Id, + PERMISSION_MANAGE_PUBLIC_CHANNEL_PROPERTIES.Id, + PERMISSION_MANAGE_PUBLIC_CHANNEL_MEMBERS.Id, + PERMISSION_MANAGE_PRIVATE_CHANNEL_MEMBERS.Id, + PERMISSION_DELETE_PUBLIC_CHANNEL.Id, + PERMISSION_CREATE_PUBLIC_CHANNEL.Id, + PERMISSION_MANAGE_PRIVATE_CHANNEL_PROPERTIES.Id, + PERMISSION_DELETE_PRIVATE_CHANNEL.Id, + PERMISSION_CREATE_PRIVATE_CHANNEL.Id, + PERMISSION_MANAGE_SYSTEM_WIDE_OAUTH.Id, + PERMISSION_MANAGE_OTHERS_WEBHOOKS.Id, + PERMISSION_EDIT_OTHER_USERS.Id, + PERMISSION_MANAGE_OAUTH.Id, + PERMISSION_INVITE_USER.Id, + PERMISSION_DELETE_POST.Id, + PERMISSION_DELETE_OTHERS_POSTS.Id, + PERMISSION_CREATE_TEAM.Id, + PERMISSION_ADD_USER_TO_TEAM.Id, + PERMISSION_LIST_USERS_WITHOUT_TEAM.Id, + PERMISSION_MANAGE_JOBS.Id, + PERMISSION_CREATE_POST_PUBLIC.Id, + PERMISSION_CREATE_USER_ACCESS_TOKEN.Id, + PERMISSION_READ_USER_ACCESS_TOKEN.Id, + PERMISSION_REVOKE_USER_ACCESS_TOKEN.Id, + }, + DefaultRoles[TEAM_USER_ROLE_ID].Permissions..., + ), + DefaultRoles[CHANNEL_USER_ROLE_ID].Permissions..., + ), + DefaultRoles[TEAM_ADMIN_ROLE_ID].Permissions..., + ), + DefaultRoles[CHANNEL_ADMIN_ROLE_ID].Permissions..., + ), + } +} + +func RoleIdsToString(roles []string) string { + output := "" + for _, role := range roles { + output += role + ", " + } + + if output == "" { + return "[<NO ROLES>]" + } + + return output[:len(output)-1] +} + +func init() { + initializePermissions() + initializeDefaultRoles() +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/authorize.go b/vendor/github.com/mattermost/mattermost-server/model/authorize.go new file mode 100644 index 00000000..2296e7e2 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/authorize.go @@ -0,0 +1,141 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" +) + +const ( + AUTHCODE_EXPIRE_TIME = 60 * 10 // 10 minutes + AUTHCODE_RESPONSE_TYPE = "code" + DEFAULT_SCOPE = "user" +) + +type AuthData struct { + ClientId string `json:"client_id"` + UserId string `json:"user_id"` + Code string `json:"code"` + ExpiresIn int32 `json:"expires_in"` + CreateAt int64 `json:"create_at"` + RedirectUri string `json:"redirect_uri"` + State string `json:"state"` + Scope string `json:"scope"` +} + +type AuthorizeRequest struct { + ResponseType string `json:"response_type"` + ClientId string `json:"client_id"` + RedirectUri string `json:"redirect_uri"` + Scope string `json:"scope"` + State string `json:"state"` +} + +// IsValid validates the AuthData and returns an error if it isn't configured +// correctly. +func (ad *AuthData) IsValid() *AppError { + + if len(ad.ClientId) != 26 { + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.client_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(ad.UserId) != 26 { + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(ad.Code) == 0 || len(ad.Code) > 128 { + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.auth_code.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest) + } + + if ad.ExpiresIn == 0 { + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.expires.app_error", nil, "", http.StatusBadRequest) + } + + if ad.CreateAt <= 0 { + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.create_at.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest) + } + + if len(ad.RedirectUri) == 0 || len(ad.RedirectUri) > 256 || !IsValidHttpUrl(ad.RedirectUri) { + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.redirect_uri.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest) + } + + if len(ad.State) > 128 { + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.state.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest) + } + + if len(ad.Scope) > 128 { + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.scope.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest) + } + + return nil +} + +// IsValid validates the AuthorizeRequest and returns an error if it isn't configured +// correctly. +func (ar *AuthorizeRequest) IsValid() *AppError { + + if len(ar.ClientId) != 26 { + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.client_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(ar.ResponseType) == 0 { + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.response_type.app_error", nil, "", http.StatusBadRequest) + } + + if len(ar.RedirectUri) == 0 || len(ar.RedirectUri) > 256 || !IsValidHttpUrl(ar.RedirectUri) { + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.redirect_uri.app_error", nil, "client_id="+ar.ClientId, http.StatusBadRequest) + } + + if len(ar.State) > 128 { + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.state.app_error", nil, "client_id="+ar.ClientId, http.StatusBadRequest) + } + + if len(ar.Scope) > 128 { + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.scope.app_error", nil, "client_id="+ar.ClientId, http.StatusBadRequest) + } + + return nil +} + +func (ad *AuthData) PreSave() { + if ad.ExpiresIn == 0 { + ad.ExpiresIn = AUTHCODE_EXPIRE_TIME + } + + if ad.CreateAt == 0 { + ad.CreateAt = GetMillis() + } + + if len(ad.Scope) == 0 { + ad.Scope = DEFAULT_SCOPE + } +} + +func (ad *AuthData) ToJson() string { + b, _ := json.Marshal(ad) + return string(b) +} + +func AuthDataFromJson(data io.Reader) *AuthData { + var ad *AuthData + json.NewDecoder(data).Decode(&ad) + return ad +} + +func (ar *AuthorizeRequest) ToJson() string { + b, _ := json.Marshal(ar) + return string(b) +} + +func AuthorizeRequestFromJson(data io.Reader) *AuthorizeRequest { + var ar *AuthorizeRequest + json.NewDecoder(data).Decode(&ar) + return ar +} + +func (ad *AuthData) IsExpired() bool { + return GetMillis() > ad.CreateAt+int64(ad.ExpiresIn*1000) +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/builtin.go b/vendor/github.com/mattermost/mattermost-server/model/builtin.go new file mode 100644 index 00000000..5dd00a96 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/builtin.go @@ -0,0 +1,9 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +func NewBool(b bool) *bool { return &b } +func NewInt(n int) *int { return &n } +func NewInt64(n int64) *int64 { return &n } +func NewString(s string) *string { return &s } diff --git a/vendor/github.com/mattermost/mattermost-server/model/bundle_info.go b/vendor/github.com/mattermost/mattermost-server/model/bundle_info.go new file mode 100644 index 00000000..6965159c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/bundle_info.go @@ -0,0 +1,23 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +type BundleInfo struct { + Path string + + Manifest *Manifest + ManifestPath string + ManifestError error +} + +// Returns bundle info for the given path. The return value is never nil. +func BundleInfoForPath(path string) *BundleInfo { + m, mpath, err := FindManifest(path) + return &BundleInfo{ + Path: path, + Manifest: m, + ManifestPath: mpath, + ManifestError: err, + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel.go b/vendor/github.com/mattermost/mattermost-server/model/channel.go new file mode 100644 index 00000000..ce812be3 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/channel.go @@ -0,0 +1,208 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "io" + "net/http" + "sort" + "strings" + "unicode/utf8" +) + +const ( + CHANNEL_OPEN = "O" + CHANNEL_PRIVATE = "P" + CHANNEL_DIRECT = "D" + CHANNEL_GROUP = "G" + CHANNEL_GROUP_MAX_USERS = 8 + CHANNEL_GROUP_MIN_USERS = 3 + DEFAULT_CHANNEL = "town-square" + CHANNEL_DISPLAY_NAME_MAX_RUNES = 64 + CHANNEL_NAME_MIN_LENGTH = 2 + CHANNEL_NAME_MAX_LENGTH = 64 + CHANNEL_NAME_UI_MAX_LENGTH = 22 + CHANNEL_HEADER_MAX_RUNES = 1024 + CHANNEL_PURPOSE_MAX_RUNES = 250 + CHANNEL_CACHE_SIZE = 25000 +) + +type Channel struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + TeamId string `json:"team_id"` + Type string `json:"type"` + DisplayName string `json:"display_name"` + Name string `json:"name"` + Header string `json:"header"` + Purpose string `json:"purpose"` + LastPostAt int64 `json:"last_post_at"` + TotalMsgCount int64 `json:"total_msg_count"` + ExtraUpdateAt int64 `json:"extra_update_at"` + CreatorId string `json:"creator_id"` +} + +type ChannelPatch struct { + DisplayName *string `json:"display_name"` + Name *string `json:"name"` + Header *string `json:"header"` + Purpose *string `json:"purpose"` +} + +func (o *Channel) DeepCopy() *Channel { + copy := *o + return © +} + +func (o *Channel) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func (o *ChannelPatch) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func ChannelFromJson(data io.Reader) *Channel { + var o *Channel + json.NewDecoder(data).Decode(&o) + return o +} + +func ChannelPatchFromJson(data io.Reader) *ChannelPatch { + var o *ChannelPatch + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *Channel) Etag() string { + return Etag(o.Id, o.UpdateAt) +} + +func (o *Channel) StatsEtag() string { + return Etag(o.Id, o.ExtraUpdateAt) +} + +func (o *Channel) IsValid() *AppError { + + if len(o.Id) != 26 { + return NewAppError("Channel.IsValid", "model.channel.is_valid.id.app_error", nil, "", http.StatusBadRequest) + } + + if o.CreateAt == 0 { + return NewAppError("Channel.IsValid", "model.channel.is_valid.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if o.UpdateAt == 0 { + return NewAppError("Channel.IsValid", "model.channel.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if utf8.RuneCountInString(o.DisplayName) > CHANNEL_DISPLAY_NAME_MAX_RUNES { + return NewAppError("Channel.IsValid", "model.channel.is_valid.display_name.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if !IsValidChannelIdentifier(o.Name) { + return NewAppError("Channel.IsValid", "model.channel.is_valid.2_or_more.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if !(o.Type == CHANNEL_OPEN || o.Type == CHANNEL_PRIVATE || o.Type == CHANNEL_DIRECT || o.Type == CHANNEL_GROUP) { + return NewAppError("Channel.IsValid", "model.channel.is_valid.type.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if utf8.RuneCountInString(o.Header) > CHANNEL_HEADER_MAX_RUNES { + return NewAppError("Channel.IsValid", "model.channel.is_valid.header.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if utf8.RuneCountInString(o.Purpose) > CHANNEL_PURPOSE_MAX_RUNES { + return NewAppError("Channel.IsValid", "model.channel.is_valid.purpose.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if len(o.CreatorId) > 26 { + return NewAppError("Channel.IsValid", "model.channel.is_valid.creator_id.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (o *Channel) PreSave() { + if o.Id == "" { + o.Id = NewId() + } + + o.CreateAt = GetMillis() + o.UpdateAt = o.CreateAt + o.ExtraUpdateAt = o.CreateAt +} + +func (o *Channel) PreUpdate() { + o.UpdateAt = GetMillis() +} + +func (o *Channel) ExtraUpdated() { + o.ExtraUpdateAt = GetMillis() +} + +func (o *Channel) IsGroupOrDirect() bool { + return o.Type == CHANNEL_DIRECT || o.Type == CHANNEL_GROUP +} + +func (o *Channel) Patch(patch *ChannelPatch) { + if patch.DisplayName != nil { + o.DisplayName = *patch.DisplayName + } + + if patch.Name != nil { + o.Name = *patch.Name + } + + if patch.Header != nil { + o.Header = *patch.Header + } + + if patch.Purpose != nil { + o.Purpose = *patch.Purpose + } +} + +func GetDMNameFromIds(userId1, userId2 string) string { + if userId1 > userId2 { + return userId2 + "__" + userId1 + } else { + return userId1 + "__" + userId2 + } +} + +func GetGroupDisplayNameFromUsers(users []*User, truncate bool) string { + usernames := make([]string, len(users)) + for index, user := range users { + usernames[index] = user.Username + } + + sort.Strings(usernames) + + name := strings.Join(usernames, ", ") + + if truncate && len(name) > CHANNEL_NAME_MAX_LENGTH { + name = name[:CHANNEL_NAME_MAX_LENGTH] + } + + return name +} + +func GetGroupNameFromUserIds(userIds []string) string { + sort.Strings(userIds) + + h := sha1.New() + for _, id := range userIds { + io.WriteString(h, id) + } + + return hex.EncodeToString(h.Sum(nil)) +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_count.go b/vendor/github.com/mattermost/mattermost-server/model/channel_count.go new file mode 100644 index 00000000..8c6d8dd0 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/channel_count.go @@ -0,0 +1,54 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "crypto/md5" + "encoding/json" + "fmt" + "io" + "sort" + "strconv" +) + +type ChannelCounts struct { + Counts map[string]int64 `json:"counts"` + UpdateTimes map[string]int64 `json:"update_times"` +} + +func (o *ChannelCounts) Etag() string { + + ids := []string{} + for id := range o.Counts { + ids = append(ids, id) + } + sort.Strings(ids) + + str := "" + for _, id := range ids { + str += id + strconv.FormatInt(o.Counts[id], 10) + } + + md5Counts := fmt.Sprintf("%x", md5.Sum([]byte(str))) + + var update int64 = 0 + for _, u := range o.UpdateTimes { + if u > update { + update = u + } + } + + return Etag(md5Counts, update) +} + +func (o *ChannelCounts) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func ChannelCountsFromJson(data io.Reader) *ChannelCounts { + var o *ChannelCounts + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_data.go b/vendor/github.com/mattermost/mattermost-server/model/channel_data.go new file mode 100644 index 00000000..aae0a149 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/channel_data.go @@ -0,0 +1,34 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type ChannelData struct { + Channel *Channel `json:"channel"` + Member *ChannelMember `json:"member"` +} + +func (o *ChannelData) Etag() string { + var mt int64 = 0 + if o.Member != nil { + mt = o.Member.LastUpdateAt + } + + return Etag(o.Channel.Id, o.Channel.UpdateAt, o.Channel.LastPostAt, mt) +} + +func (o *ChannelData) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func ChannelDataFromJson(data io.Reader) *ChannelData { + var o *ChannelData + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_list.go b/vendor/github.com/mattermost/mattermost-server/model/channel_list.go new file mode 100644 index 00000000..1b3bda46 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/channel_list.go @@ -0,0 +1,53 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type ChannelList []*Channel + +func (o *ChannelList) ToJson() string { + if b, err := json.Marshal(o); err != nil { + return "[]" + } else { + return string(b) + } +} + +func (o *ChannelList) Etag() string { + + id := "0" + var t int64 = 0 + var delta int64 = 0 + + for _, v := range *o { + if v.LastPostAt > t { + t = v.LastPostAt + id = v.Id + } + + if v.UpdateAt > t { + t = v.UpdateAt + id = v.Id + } + + } + + return Etag(id, t, delta, len(*o)) +} + +func ChannelListFromJson(data io.Reader) *ChannelList { + var o *ChannelList + json.NewDecoder(data).Decode(&o) + return o +} + +func ChannelSliceFromJson(data io.Reader) []*Channel { + var o []*Channel + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_member.go b/vendor/github.com/mattermost/mattermost-server/model/channel_member.go new file mode 100644 index 00000000..e9895aea --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/channel_member.go @@ -0,0 +1,148 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" + "strings" +) + +const ( + CHANNEL_NOTIFY_DEFAULT = "default" + CHANNEL_NOTIFY_ALL = "all" + CHANNEL_NOTIFY_MENTION = "mention" + CHANNEL_NOTIFY_NONE = "none" + CHANNEL_MARK_UNREAD_ALL = "all" + CHANNEL_MARK_UNREAD_MENTION = "mention" +) + +type ChannelUnread struct { + TeamId string `json:"team_id"` + ChannelId string `json:"channel_id"` + MsgCount int64 `json:"msg_count"` + MentionCount int64 `json:"mention_count"` + NotifyProps StringMap `json:"-"` +} + +type ChannelMember struct { + ChannelId string `json:"channel_id"` + UserId string `json:"user_id"` + Roles string `json:"roles"` + LastViewedAt int64 `json:"last_viewed_at"` + MsgCount int64 `json:"msg_count"` + MentionCount int64 `json:"mention_count"` + NotifyProps StringMap `json:"notify_props"` + LastUpdateAt int64 `json:"last_update_at"` +} + +type ChannelMembers []ChannelMember + +func (o *ChannelMembers) ToJson() string { + if b, err := json.Marshal(o); err != nil { + return "[]" + } else { + return string(b) + } +} + +func (o *ChannelUnread) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func ChannelMembersFromJson(data io.Reader) *ChannelMembers { + var o *ChannelMembers + json.NewDecoder(data).Decode(&o) + return o +} + +func ChannelUnreadFromJson(data io.Reader) *ChannelUnread { + var o *ChannelUnread + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *ChannelMember) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func ChannelMemberFromJson(data io.Reader) *ChannelMember { + var o *ChannelMember + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *ChannelMember) IsValid() *AppError { + + if len(o.ChannelId) != 26 { + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.UserId) != 26 { + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) + } + + notifyLevel := o.NotifyProps[DESKTOP_NOTIFY_PROP] + if len(notifyLevel) > 20 || !IsChannelNotifyLevelValid(notifyLevel) { + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.notify_level.app_error", nil, "notify_level="+notifyLevel, http.StatusBadRequest) + } + + markUnreadLevel := o.NotifyProps[MARK_UNREAD_NOTIFY_PROP] + if len(markUnreadLevel) > 20 || !IsChannelMarkUnreadLevelValid(markUnreadLevel) { + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.unread_level.app_error", nil, "mark_unread_level="+markUnreadLevel, http.StatusBadRequest) + } + + if pushLevel, ok := o.NotifyProps[PUSH_NOTIFY_PROP]; ok { + if len(pushLevel) > 20 || !IsChannelNotifyLevelValid(pushLevel) { + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.push_level.app_error", nil, "push_notification_level="+pushLevel, http.StatusBadRequest) + } + } + + if sendEmail, ok := o.NotifyProps[EMAIL_NOTIFY_PROP]; ok { + if len(sendEmail) > 20 || !IsSendEmailValid(sendEmail) { + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.email_value.app_error", nil, "push_notification_level="+sendEmail, http.StatusBadRequest) + } + } + + return nil +} + +func (o *ChannelMember) PreSave() { + o.LastUpdateAt = GetMillis() +} + +func (o *ChannelMember) PreUpdate() { + o.LastUpdateAt = GetMillis() +} + +func (o *ChannelMember) GetRoles() []string { + return strings.Fields(o.Roles) +} + +func IsChannelNotifyLevelValid(notifyLevel string) bool { + return notifyLevel == CHANNEL_NOTIFY_DEFAULT || + notifyLevel == CHANNEL_NOTIFY_ALL || + notifyLevel == CHANNEL_NOTIFY_MENTION || + notifyLevel == CHANNEL_NOTIFY_NONE +} + +func IsChannelMarkUnreadLevelValid(markUnreadLevel string) bool { + return markUnreadLevel == CHANNEL_MARK_UNREAD_ALL || markUnreadLevel == CHANNEL_MARK_UNREAD_MENTION +} + +func IsSendEmailValid(sendEmail string) bool { + return sendEmail == CHANNEL_NOTIFY_DEFAULT || sendEmail == "true" || sendEmail == "false" +} + +func GetDefaultChannelNotifyProps() StringMap { + return StringMap{ + DESKTOP_NOTIFY_PROP: CHANNEL_NOTIFY_DEFAULT, + MARK_UNREAD_NOTIFY_PROP: CHANNEL_MARK_UNREAD_ALL, + PUSH_NOTIFY_PROP: CHANNEL_NOTIFY_DEFAULT, + EMAIL_NOTIFY_PROP: CHANNEL_NOTIFY_DEFAULT, + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_member_history.go b/vendor/github.com/mattermost/mattermost-server/model/channel_member_history.go new file mode 100644 index 00000000..47c59d54 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/channel_member_history.go @@ -0,0 +1,15 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +type ChannelMemberHistory struct { + ChannelId string + UserId string + JoinTime int64 + LeaveTime *int64 + + // these two fields are never set in the database - when we SELECT, we join on Users to get them + UserEmail string `db:"Email"` + Username string +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_search.go b/vendor/github.com/mattermost/mattermost-server/model/channel_search.go new file mode 100644 index 00000000..593cf669 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/channel_search.go @@ -0,0 +1,26 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type ChannelSearch struct { + Term string `json:"term"` +} + +// ToJson convert a Channel to a json string +func (c *ChannelSearch) ToJson() string { + b, _ := json.Marshal(c) + return string(b) +} + +// ChannelSearchFromJson will decode the input and return a Channel +func ChannelSearchFromJson(data io.Reader) *ChannelSearch { + var cs *ChannelSearch + json.NewDecoder(data).Decode(&cs) + return cs +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_stats.go b/vendor/github.com/mattermost/mattermost-server/model/channel_stats.go new file mode 100644 index 00000000..21af920f --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/channel_stats.go @@ -0,0 +1,25 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type ChannelStats struct { + ChannelId string `json:"channel_id"` + MemberCount int64 `json:"member_count"` +} + +func (o *ChannelStats) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func ChannelStatsFromJson(data io.Reader) *ChannelStats { + var o *ChannelStats + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/channel_view.go b/vendor/github.com/mattermost/mattermost-server/model/channel_view.go new file mode 100644 index 00000000..650d14ce --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/channel_view.go @@ -0,0 +1,41 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type ChannelView struct { + ChannelId string `json:"channel_id"` + PrevChannelId string `json:"prev_channel_id"` +} + +func (o *ChannelView) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func ChannelViewFromJson(data io.Reader) *ChannelView { + var o *ChannelView + json.NewDecoder(data).Decode(&o) + return o +} + +type ChannelViewResponse struct { + Status string `json:"status"` + LastViewedAtTimes map[string]int64 `json:"last_viewed_at_times"` +} + +func (o *ChannelViewResponse) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func ChannelViewResponseFromJson(data io.Reader) *ChannelViewResponse { + var o *ChannelViewResponse + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/client.go b/vendor/github.com/mattermost/mattermost-server/model/client.go new file mode 100644 index 00000000..ef890b59 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/client.go @@ -0,0 +1,2379 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + l4g "github.com/alecthomas/log4go" +) + +var UsedApiV3 *int32 = new(int32) + +const ( + HEADER_REQUEST_ID = "X-Request-ID" + HEADER_VERSION_ID = "X-Version-ID" + HEADER_CLUSTER_ID = "X-Cluster-ID" + HEADER_ETAG_SERVER = "ETag" + HEADER_ETAG_CLIENT = "If-None-Match" + HEADER_FORWARDED = "X-Forwarded-For" + HEADER_REAL_IP = "X-Real-IP" + HEADER_FORWARDED_PROTO = "X-Forwarded-Proto" + HEADER_TOKEN = "token" + HEADER_BEARER = "BEARER" + HEADER_AUTH = "Authorization" + HEADER_REQUESTED_WITH = "X-Requested-With" + HEADER_REQUESTED_WITH_XML = "XMLHttpRequest" + STATUS = "status" + STATUS_OK = "OK" + STATUS_FAIL = "FAIL" + STATUS_REMOVE = "REMOVE" + + CLIENT_DIR = "client" + + API_URL_SUFFIX_V1 = "/api/v1" + API_URL_SUFFIX_V3 = "/api/v3" + API_URL_SUFFIX_V4 = "/api/v4" + API_URL_SUFFIX = API_URL_SUFFIX_V4 +) + +type Result struct { + RequestId string + Etag string + Data interface{} +} + +type ResponseMetadata struct { + StatusCode int + Error *AppError + RequestId string + Etag string +} + +type Client struct { + Url string // The location of the server like "http://localhost:8065" + ApiUrl string // The api location of the server like "http://localhost:8065/api/v3" + HttpClient *http.Client // The http client + AuthToken string + AuthType string + TeamId string + RequestId string + Etag string + ServerVersion string +} + +// NewClient constructs a new client with convienence methods for talking to +// the server. +func NewClient(url string) *Client { + return &Client{url, url + API_URL_SUFFIX_V3, &http.Client{}, "", "", "", "", "", ""} +} + +func closeBody(r *http.Response) { + if r.Body != nil { + ioutil.ReadAll(r.Body) + r.Body.Close() + } +} + +func (c *Client) SetOAuthToken(token string) { + c.AuthToken = token + c.AuthType = HEADER_TOKEN +} + +func (c *Client) ClearOAuthToken() { + c.AuthToken = "" + c.AuthType = HEADER_BEARER +} + +func (c *Client) SetTeamId(teamId string) { + c.TeamId = teamId +} + +func (c *Client) GetTeamId() string { + if len(c.TeamId) == 0 { + println(`You are trying to use a route that requires a team_id, + but you have not called SetTeamId() in client.go`) + } + + return c.TeamId +} + +func (c *Client) ClearTeamId() { + c.TeamId = "" +} + +func (c *Client) GetTeamRoute() string { + return fmt.Sprintf("/teams/%v", c.GetTeamId()) +} + +func (c *Client) GetChannelRoute(channelId string) string { + return fmt.Sprintf("/teams/%v/channels/%v", c.GetTeamId(), channelId) +} + +func (c *Client) GetUserRequiredRoute(userId string) string { + return fmt.Sprintf("/users/%v", userId) +} + +func (c *Client) GetChannelNameRoute(channelName string) string { + return fmt.Sprintf("/teams/%v/channels/name/%v", c.GetTeamId(), channelName) +} + +func (c *Client) GetEmojiRoute() string { + return "/emoji" +} + +func (c *Client) GetGeneralRoute() string { + return "/general" +} + +func (c *Client) GetFileRoute(fileId string) string { + return fmt.Sprintf("/files/%v", fileId) +} + +func (c *Client) DoPost(url, data, contentType string) (*http.Response, *AppError) { + rq, _ := http.NewRequest("POST", c.Url+url, strings.NewReader(data)) + rq.Header.Set("Content-Type", contentType) + rq.Close = true + + if rp, err := c.HttpClient.Do(rq); err != nil { + return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0) + } else if rp.StatusCode >= 300 { + defer closeBody(rp) + return nil, AppErrorFromJson(rp.Body) + } else { + return rp, nil + } +} + +func (c *Client) DoApiPost(url string, data string) (*http.Response, *AppError) { + rq, _ := http.NewRequest("POST", c.ApiUrl+url, strings.NewReader(data)) + rq.Close = true + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil { + return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0) + } else if rp.StatusCode >= 300 { + defer closeBody(rp) + return nil, AppErrorFromJson(rp.Body) + } else { + return rp, nil + } +} + +func (c *Client) DoApiGet(url string, data string, etag string) (*http.Response, *AppError) { + rq, _ := http.NewRequest("GET", c.ApiUrl+url, strings.NewReader(data)) + rq.Close = true + + if len(etag) > 0 { + rq.Header.Set(HEADER_ETAG_CLIENT, etag) + } + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil { + return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0) + } else if rp.StatusCode == 304 { + return rp, nil + } else if rp.StatusCode >= 300 { + defer closeBody(rp) + return rp, AppErrorFromJson(rp.Body) + } else { + return rp, nil + } +} + +func getCookie(name string, resp *http.Response) *http.Cookie { + for _, cookie := range resp.Cookies() { + if cookie.Name == name { + return cookie + } + } + + return nil +} + +// Must is a convenience function used for testing. +func (c *Client) Must(result *Result, err *AppError) *Result { + if err != nil { + l4g.Close() + time.Sleep(time.Second) + panic(err) + } + + return result +} + +// MustGeneric is a convenience function used for testing. +func (c *Client) MustGeneric(result interface{}, err *AppError) interface{} { + if err != nil { + l4g.Close() + time.Sleep(time.Second) + panic(err) + } + + return result +} + +// CheckStatusOK is a convenience function for checking the return of Web Service +// call that return the a map of status=OK. +func (c *Client) CheckStatusOK(r *http.Response) bool { + m := MapFromJson(r.Body) + defer closeBody(r) + + if m != nil && m[STATUS] == STATUS_OK { + return true + } + + return false +} + +func (c *Client) fillInExtraProperties(r *http.Response) { + c.RequestId = r.Header.Get(HEADER_REQUEST_ID) + c.Etag = r.Header.Get(HEADER_ETAG_SERVER) + c.ServerVersion = r.Header.Get(HEADER_VERSION_ID) +} + +func (c *Client) clearExtraProperties() { + c.RequestId = "" + c.Etag = "" + c.ServerVersion = "" +} + +// General Routes Section + +// GetClientProperties returns properties needed by the client to show/hide +// certian features. It returns a map of strings. +func (c *Client) GetClientProperties() (map[string]string, *AppError) { + c.clearExtraProperties() + if r, err := c.DoApiGet(c.GetGeneralRoute()+"/client_props", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + c.fillInExtraProperties(r) + return MapFromJson(r.Body), nil + } +} + +// LogClient is a convenience Web Service call so clients can log messages into +// the server-side logs. For example we typically log javascript error messages +// into the server-side. It returns true if the logging was successful. +func (c *Client) LogClient(message string) (bool, *AppError) { + c.clearExtraProperties() + m := make(map[string]string) + m["level"] = "ERROR" + m["message"] = message + + if r, err := c.DoApiPost(c.GetGeneralRoute()+"/log_client", MapToJson(m)); err != nil { + return false, err + } else { + defer closeBody(r) + c.fillInExtraProperties(r) + return c.CheckStatusOK(r), nil + } +} + +// GetPing returns a map of strings with server time, server version, and node Id. +// Systems that want to check on health status of the server should check the +// url /api/v3/ping for a 200 status response. +func (c *Client) GetPing() (map[string]string, *AppError) { + c.clearExtraProperties() + if r, err := c.DoApiGet(c.GetGeneralRoute()+"/ping", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + c.fillInExtraProperties(r) + return MapFromJson(r.Body), nil + } +} + +// Team Routes Section + +// CreateTeam creates a team based on the provided Team struct. On success it returns +// the Team struct with the Id, CreateAt and other server-decided fields populated. +func (c *Client) CreateTeam(team *Team) (*Result, *AppError) { + if r, err := c.DoApiPost("/teams/create", team.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamFromJson(r.Body)}, nil + } +} + +// GetAllTeams returns a map of all teams using team ids as the key. +func (c *Client) GetAllTeams() (*Result, *AppError) { + if r, err := c.DoApiGet("/teams/all", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamMapFromJson(r.Body)}, nil + } +} + +// GetAllTeamListings returns a map of all teams that are available to join +// using team ids as the key. Must be authenticated. +func (c *Client) GetAllTeamListings() (*Result, *AppError) { + if r, err := c.DoApiGet("/teams/all_team_listings", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamMapFromJson(r.Body)}, nil + } +} + +// FindTeamByName returns the strings "true" or "false" depending on if a team +// with the provided name was found. +func (c *Client) FindTeamByName(name string) (*Result, *AppError) { + m := make(map[string]string) + m["name"] = name + if r, err := c.DoApiPost("/teams/find_team_by_name", MapToJson(m)); err != nil { + return nil, err + } else { + val := false + if body, _ := ioutil.ReadAll(r.Body); string(body) == "true" { + val = true + } + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), val}, nil + } +} + +// Adds a user directly to the team without sending an invite. +// The teamId and userId are required. You must be a valid member of the team and/or +// have the correct role to add new users to the team. Returns a map of user_id=userId +// if successful, otherwise returns an AppError. +func (c *Client) AddUserToTeam(teamId string, userId string) (*Result, *AppError) { + if len(teamId) == 0 { + teamId = c.GetTeamId() + } + + data := make(map[string]string) + data["user_id"] = userId + if r, err := c.DoApiPost(fmt.Sprintf("/teams/%v", teamId)+"/add_user_to_team", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +// AddUserToTeamFromInvite adds a user to a team based off data provided in an invite link. +// Either hash and dataToHash are required or inviteId is required. +func (c *Client) AddUserToTeamFromInvite(hash, dataToHash, inviteId string) (*Result, *AppError) { + data := make(map[string]string) + data["hash"] = hash + data["data"] = dataToHash + data["invite_id"] = inviteId + if r, err := c.DoApiPost("/teams/add_user_to_team_from_invite", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamFromJson(r.Body)}, nil + } +} + +// Removes a user directly from the team. +// The teamId and userId are required. You must be a valid member of the team and/or +// have the correct role to remove a user from the team. Returns a map of user_id=userId +// if successful, otherwise returns an AppError. +func (c *Client) RemoveUserFromTeam(teamId string, userId string) (*Result, *AppError) { + if len(teamId) == 0 { + teamId = c.GetTeamId() + } + + data := make(map[string]string) + data["user_id"] = userId + if r, err := c.DoApiPost(fmt.Sprintf("/teams/%v", teamId)+"/remove_user_from_team", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) InviteMembers(invites *Invites) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/invite_members", invites.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), InvitesFromJson(r.Body)}, nil + } +} + +// UpdateTeam updates a team based on the changes in the provided team struct. On success +// it returns a sanitized version of the updated team. Must be authenticated as a team admin +// for that team or a system admin. +func (c *Client) UpdateTeam(team *Team) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/update", team.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamFromJson(r.Body)}, nil + } +} + +// User Routes Section + +// CreateUser creates a user in the system based on the provided user struct. +func (c *Client) CreateUser(user *User, hash string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/create", user.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +// CreateUserWithInvite creates a user based on the provided user struct. Either the hash and +// data strings or the inviteId is required from the invite. +func (c *Client) CreateUserWithInvite(user *User, hash string, data string, inviteId string) (*Result, *AppError) { + + url := "/users/create?d=" + url.QueryEscape(data) + "&h=" + url.QueryEscape(hash) + "&iid=" + url.QueryEscape(inviteId) + + if r, err := c.DoApiPost(url, user.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +func (c *Client) CreateUserFromSignup(user *User, data string, hash string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/create?d="+url.QueryEscape(data)+"&h="+hash, user.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +// GetUser returns a user based on a provided user id string. Must be authenticated. +func (c *Client) GetUser(id string, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet("/users/"+id+"/get", "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +// getByUsername returns a user based on a provided username string. Must be authenticated. +func (c *Client) GetByUsername(username string, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(fmt.Sprintf("/users/name/%v", username), "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +// getByEmail returns a user based on a provided username string. Must be authenticated. +func (c *Client) GetByEmail(email string, etag string) (*User, *ResponseMetadata) { + if r, err := c.DoApiGet(fmt.Sprintf("/users/email/%v", email), "", etag); err != nil { + return nil, &ResponseMetadata{StatusCode: r.StatusCode, Error: err} + } else { + defer closeBody(r) + return UserFromJson(r.Body), + &ResponseMetadata{ + StatusCode: r.StatusCode, + RequestId: r.Header.Get(HEADER_REQUEST_ID), + Etag: r.Header.Get(HEADER_ETAG_SERVER), + } + } +} + +// GetMe returns the current user. +func (c *Client) GetMe(etag string) (*Result, *AppError) { + if r, err := c.DoApiGet("/users/me", "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +// GetProfiles returns a map of users using user id as the key. Must be authenticated. +func (c *Client) GetProfiles(offset int, limit int, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(fmt.Sprintf("/users/%v/%v", offset, limit), "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserMapFromJson(r.Body)}, nil + } +} + +// GetProfilesInTeam returns a map of users for a team using user id as the key. Must +// be authenticated. +func (c *Client) GetProfilesInTeam(teamId string, offset int, limit int, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(fmt.Sprintf("/teams/%v/users/%v/%v", teamId, offset, limit), "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserMapFromJson(r.Body)}, nil + } +} + +// GetProfilesInChannel returns a map of users for a channel using user id as the key. Must +// be authenticated. +func (c *Client) GetProfilesInChannel(channelId string, offset int, limit int, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(fmt.Sprintf(c.GetChannelRoute(channelId)+"/users/%v/%v", offset, limit), "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserMapFromJson(r.Body)}, nil + } +} + +// GetProfilesNotInChannel returns a map of users not in a channel but on the team using user id as the key. Must +// be authenticated. +func (c *Client) GetProfilesNotInChannel(channelId string, offset int, limit int, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(fmt.Sprintf(c.GetChannelRoute(channelId)+"/users/not_in_channel/%v/%v", offset, limit), "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserMapFromJson(r.Body)}, nil + } +} + +// GetProfilesByIds returns a map of users based on the user ids provided. Must +// be authenticated. +func (c *Client) GetProfilesByIds(userIds []string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/ids", ArrayToJson(userIds)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserMapFromJson(r.Body)}, nil + } +} + +// SearchUsers returns a list of users that have a username matching or similar to the search term. Must +// be authenticated. +func (c *Client) SearchUsers(params UserSearch) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/search", params.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserListFromJson(r.Body)}, nil + } +} + +// AutocompleteUsersInChannel returns two lists for autocompletion of users in a channel. The first list "in_channel", +// specifies users in the channel. The second list "out_of_channel" specifies users outside of the +// channel. Term, the string to search against, is required, channel id is also required. Must be authenticated. +func (c *Client) AutocompleteUsersInChannel(term string, channelId string) (*Result, *AppError) { + url := fmt.Sprintf("%s/users/autocomplete?term=%s", c.GetChannelRoute(channelId), url.QueryEscape(term)) + if r, err := c.DoApiGet(url, "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserAutocompleteInChannelFromJson(r.Body)}, nil + } +} + +// AutocompleteUsersInTeam returns a list for autocompletion of users in a team. The list "in_team" specifies +// the users in the team that match the provided term, matching against username, full name and +// nickname. Must be authenticated. +func (c *Client) AutocompleteUsersInTeam(term string) (*Result, *AppError) { + url := fmt.Sprintf("%s/users/autocomplete?term=%s", c.GetTeamRoute(), url.QueryEscape(term)) + if r, err := c.DoApiGet(url, "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserAutocompleteInTeamFromJson(r.Body)}, nil + } +} + +// AutocompleteUsers returns a list for autocompletion of users on the system that match the provided term, +// matching against username, full name and nickname. Must be authenticated. +func (c *Client) AutocompleteUsers(term string) (*Result, *AppError) { + url := fmt.Sprintf("/users/autocomplete?term=%s", url.QueryEscape(term)) + if r, err := c.DoApiGet(url, "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserListFromJson(r.Body)}, nil + } +} + +// LoginById authenticates a user by user id and password. +func (c *Client) LoginById(id string, password string) (*Result, *AppError) { + m := make(map[string]string) + m["id"] = id + m["password"] = password + return c.login(m) +} + +// Login authenticates a user by login id, which can be username, email or some sort +// of SSO identifier based on configuration, and a password. +func (c *Client) Login(loginId string, password string) (*Result, *AppError) { + m := make(map[string]string) + m["login_id"] = loginId + m["password"] = password + return c.login(m) +} + +// LoginByLdap authenticates a user by LDAP id and password. +func (c *Client) LoginByLdap(loginId string, password string) (*Result, *AppError) { + m := make(map[string]string) + m["login_id"] = loginId + m["password"] = password + m["ldap_only"] = "true" + return c.login(m) +} + +// LoginWithDevice authenticates a user by login id (username, email or some sort +// of SSO identifier based on configuration), password and attaches a device id to +// the session. +func (c *Client) LoginWithDevice(loginId string, password string, deviceId string) (*Result, *AppError) { + m := make(map[string]string) + m["login_id"] = loginId + m["password"] = password + m["device_id"] = deviceId + return c.login(m) +} + +func (c *Client) login(m map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/login", MapToJson(m)); err != nil { + return nil, err + } else { + c.AuthToken = r.Header.Get(HEADER_TOKEN) + c.AuthType = HEADER_BEARER + sessionToken := getCookie(SESSION_COOKIE_TOKEN, r) + + if c.AuthToken != sessionToken.Value { + NewAppError("/users/login", "model.client.login.app_error", nil, "", 0) + } + + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +// Logout terminates the current user's session. +func (c *Client) Logout() (*Result, *AppError) { + if r, err := c.DoApiPost("/users/logout", ""); err != nil { + return nil, err + } else { + c.AuthToken = "" + c.AuthType = HEADER_BEARER + c.TeamId = "" + + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +// CheckMfa returns a map with key "mfa_required" with the string value "true" or "false", +// indicating whether MFA is required to log the user in, based on a provided login id +// (username, email or some sort of SSO identifier based on configuration). +func (c *Client) CheckMfa(loginId string) (*Result, *AppError) { + m := make(map[string]string) + m["login_id"] = loginId + + if r, err := c.DoApiPost("/users/mfa", MapToJson(m)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +// GenerateMfaSecret returns a QR code image containing the secret, to be scanned +// by a multi-factor authentication mobile application. It also returns the secret +// for manual entry. Must be authenticated. +func (c *Client) GenerateMfaSecret() (*Result, *AppError) { + if r, err := c.DoApiGet("/users/generate_mfa_secret", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +// UpdateMfa activates multi-factor authenticates for the current user if activate +// is true and a valid token is provided. If activate is false, then token is not +// required and multi-factor authentication is disabled for the current user. +func (c *Client) UpdateMfa(activate bool, token string) (*Result, *AppError) { + m := make(map[string]interface{}) + m["activate"] = activate + m["token"] = token + + if r, err := c.DoApiPost("/users/update_mfa", StringInterfaceToJson(m)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) AdminResetMfa(userId string) (*Result, *AppError) { + m := make(map[string]string) + m["user_id"] = userId + + if r, err := c.DoApiPost("/admin/reset_mfa", MapToJson(m)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) RevokeSession(sessionAltId string) (*Result, *AppError) { + m := make(map[string]string) + m["id"] = sessionAltId + + if r, err := c.DoApiPost("/users/revoke_session", MapToJson(m)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) GetSessions(id string) (*Result, *AppError) { + if r, err := c.DoApiGet("/users/"+id+"/sessions", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), SessionsFromJson(r.Body)}, nil + } +} + +func (c *Client) EmailToOAuth(m map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/claim/email_to_oauth", MapToJson(m)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) OAuthToEmail(m map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/claim/oauth_to_email", MapToJson(m)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) LDAPToEmail(m map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/claim/ldap_to_email", MapToJson(m)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) EmailToLDAP(m map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/claim/ldap_to_email", MapToJson(m)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) Command(channelId string, command string) (*Result, *AppError) { + args := &CommandArgs{ChannelId: channelId, Command: command} + if r, err := c.DoApiPost(c.GetTeamRoute()+"/commands/execute", args.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), CommandResponseFromJson(r.Body)}, nil + } +} + +func (c *Client) ListCommands() (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetTeamRoute()+"/commands/list", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), CommandListFromJson(r.Body)}, nil + } +} + +func (c *Client) ListTeamCommands() (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetTeamRoute()+"/commands/list_team_commands", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), CommandListFromJson(r.Body)}, nil + } +} + +func (c *Client) CreateCommand(cmd *Command) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/commands/create", cmd.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), CommandFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateCommand(cmd *Command) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/commands/update", cmd.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), CommandFromJson(r.Body)}, nil + } +} + +func (c *Client) RegenCommandToken(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/commands/regen_token", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), CommandFromJson(r.Body)}, nil + } +} + +func (c *Client) DeleteCommand(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/commands/delete", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) GetAudits(id string, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet("/users/"+id+"/audits", "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), AuditsFromJson(r.Body)}, nil + } +} + +func (c *Client) GetLogs() (*Result, *AppError) { + if r, err := c.DoApiGet("/admin/logs", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ArrayFromJson(r.Body)}, nil + } +} + +func (c *Client) GetClusterStatus() ([]*ClusterInfo, *AppError) { + if r, err := c.DoApiGet("/admin/cluster_status", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return ClusterInfosFromJson(r.Body), nil + } +} + +// GetRecentlyActiveUsers returns a map of users including lastActivityAt using user id as the key +func (c *Client) GetRecentlyActiveUsers(teamId string) (*Result, *AppError) { + if r, err := c.DoApiGet("/admin/recently_active_users/"+teamId, "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserMapFromJson(r.Body)}, nil + } +} + +func (c *Client) GetAllAudits() (*Result, *AppError) { + if r, err := c.DoApiGet("/admin/audits", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), AuditsFromJson(r.Body)}, nil + } +} + +func (c *Client) GetConfig() (*Result, *AppError) { + if r, err := c.DoApiGet("/admin/config", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ConfigFromJson(r.Body)}, nil + } +} + +// ReloadConfig will reload the config.json file from disk. Properties +// requiring a server restart will still need a server restart. You must +// have the system admin role to call this method. It will return status=OK +// if it's successfully reloaded the config file, otherwise check the returned error. +func (c *Client) ReloadConfig() (bool, *AppError) { + c.clearExtraProperties() + if r, err := c.DoApiGet("/admin/reload_config", "", ""); err != nil { + return false, err + } else { + c.fillInExtraProperties(r) + return c.CheckStatusOK(r), nil + } +} + +func (c *Client) InvalidateAllCaches() (bool, *AppError) { + c.clearExtraProperties() + if r, err := c.DoApiGet("/admin/invalidate_all_caches", "", ""); err != nil { + return false, err + } else { + c.fillInExtraProperties(r) + return c.CheckStatusOK(r), nil + } +} + +func (c *Client) SaveConfig(config *Config) (*Result, *AppError) { + if r, err := c.DoApiPost("/admin/save_config", config.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +// RecycleDatabaseConnection will attempt to recycle the database connections. +// You must have the system admin role to call this method. It will return status=OK +// if it's successfully recycled the connections, otherwise check the returned error. +func (c *Client) RecycleDatabaseConnection() (bool, *AppError) { + c.clearExtraProperties() + if r, err := c.DoApiGet("/admin/recycle_db_conn", "", ""); err != nil { + return false, err + } else { + c.fillInExtraProperties(r) + return c.CheckStatusOK(r), nil + } +} + +func (c *Client) TestEmail(config *Config) (*Result, *AppError) { + if r, err := c.DoApiPost("/admin/test_email", config.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +// TestLdap will run a connection test on the current LDAP settings. +// It will return the standard OK response if settings work. Otherwise +// it will return an appropriate error. +func (c *Client) TestLdap(config *Config) (*Result, *AppError) { + if r, err := c.DoApiPost("/admin/ldap_test", config.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) GetComplianceReports() (*Result, *AppError) { + if r, err := c.DoApiGet("/admin/compliance_reports", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), CompliancesFromJson(r.Body)}, nil + } +} + +func (c *Client) SaveComplianceReport(job *Compliance) (*Result, *AppError) { + if r, err := c.DoApiPost("/admin/save_compliance_report", job.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ComplianceFromJson(r.Body)}, nil + } +} + +func (c *Client) DownloadComplianceReport(id string) (*Result, *AppError) { + var rq *http.Request + rq, _ = http.NewRequest("GET", c.ApiUrl+"/admin/download_compliance_report/"+id, nil) + rq.Close = true + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, "BEARER "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil { + return nil, NewAppError("/admin/download_compliance_report", "model.client.connecting.app_error", nil, err.Error(), 0) + } else if rp.StatusCode >= 300 { + defer rp.Body.Close() + return nil, AppErrorFromJson(rp.Body) + } else { + defer closeBody(rp) + return &Result{rp.Header.Get(HEADER_REQUEST_ID), + rp.Header.Get(HEADER_ETAG_SERVER), rp.Body}, nil + } +} + +func (c *Client) GetTeamAnalytics(teamId, name string) (*Result, *AppError) { + if r, err := c.DoApiGet("/admin/analytics/"+teamId+"/"+name, "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), AnalyticsRowsFromJson(r.Body)}, nil + } +} + +func (c *Client) GetSystemAnalytics(name string) (*Result, *AppError) { + if r, err := c.DoApiGet("/admin/analytics/"+name, "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), AnalyticsRowsFromJson(r.Body)}, nil + } +} + +// Initiate immediate synchronization of LDAP users. +// The synchronization will be performed asynchronously and this function will +// always return OK unless you don't have permissions. +// You must be the system administrator to use this function. +func (c *Client) LdapSyncNow() (*Result, *AppError) { + if r, err := c.DoApiPost("/admin/ldap_sync_now", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) CreateChannel(channel *Channel) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/create", channel.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil + } +} + +func (c *Client) CreateDirectChannel(userId string) (*Result, *AppError) { + data := make(map[string]string) + data["user_id"] = userId + if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/create_direct", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil + } +} + +func (c *Client) CreateGroupChannel(userIds []string) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/create_group", ArrayToJson(userIds)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateChannel(channel *Channel) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/update", channel.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateChannelHeader(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/update_header", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateChannelPurpose(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/update_purpose", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateNotifyProps(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/update_notify_props", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) GetMyChannelMembers() (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetTeamRoute()+"/channels/members", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelMembersFromJson(r.Body)}, nil + } +} + +func (c *Client) GetChannel(id, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetChannelRoute(id)+"/", "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelDataFromJson(r.Body)}, nil + } +} + +// GetMoreChannelsPage will return a page of open channels the user is not in based on +// the provided offset and limit. Must be authenticated. +func (c *Client) GetMoreChannelsPage(offset int, limit int) (*Result, *AppError) { + if r, err := c.DoApiGet(fmt.Sprintf(c.GetTeamRoute()+"/channels/more/%v/%v", offset, limit), "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelListFromJson(r.Body)}, nil + } +} + +// SearchMoreChannels will return a list of open channels the user is not in, that matches +// the search criteria provided. Must be authenticated. +func (c *Client) SearchMoreChannels(channelSearch ChannelSearch) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/more/search", channelSearch.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelListFromJson(r.Body)}, nil + } +} + +// AutocompleteChannels will return a list of open channels that match the provided +// string. Must be authenticated. +func (c *Client) AutocompleteChannels(term string) (*Result, *AppError) { + url := fmt.Sprintf("%s/channels/autocomplete?term=%s", c.GetTeamRoute(), url.QueryEscape(term)) + if r, err := c.DoApiGet(url, "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelListFromJson(r.Body)}, nil + } +} + +func (c *Client) GetChannelCounts(etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetTeamRoute()+"/channels/counts", "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelCountsFromJson(r.Body)}, nil + } +} + +func (c *Client) GetChannels(etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetTeamRoute()+"/channels/", "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelListFromJson(r.Body)}, nil + } +} + +func (c *Client) GetChannelByName(channelName string) (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetChannelNameRoute(channelName), "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil + } +} + +func (c *Client) JoinChannel(id string) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetChannelRoute(id)+"/join", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), nil}, nil + } +} + +func (c *Client) JoinChannelByName(name string) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetChannelNameRoute(name)+"/join", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), nil}, nil + } +} + +func (c *Client) LeaveChannel(id string) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetChannelRoute(id)+"/leave", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), nil}, nil + } +} + +func (c *Client) DeleteChannel(id string) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetChannelRoute(id)+"/delete", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), nil}, nil + } +} + +func (c *Client) AddChannelMember(id, user_id string) (*Result, *AppError) { + data := make(map[string]string) + data["user_id"] = user_id + if r, err := c.DoApiPost(c.GetChannelRoute(id)+"/add", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), nil}, nil + } +} + +func (c *Client) RemoveChannelMember(id, user_id string) (*Result, *AppError) { + data := make(map[string]string) + data["user_id"] = user_id + if r, err := c.DoApiPost(c.GetChannelRoute(id)+"/remove", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), nil}, nil + } +} + +// ViewChannel performs all the actions related to viewing a channel. This includes marking +// the channel and the previous one as read, and marking the channel as being actively viewed. +// ChannelId is required but may be blank to indicate no channel is being viewed. +// PrevChannelId is optional, populate to indicate a channel switch occurred. +func (c *Client) ViewChannel(params ChannelView) (bool, *ResponseMetadata) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/channels/view", params.ToJson()); err != nil { + return false, &ResponseMetadata{StatusCode: r.StatusCode, Error: err} + } else { + return c.CheckStatusOK(r), + &ResponseMetadata{ + StatusCode: r.StatusCode, + RequestId: r.Header.Get(HEADER_REQUEST_ID), + Etag: r.Header.Get(HEADER_ETAG_SERVER), + } + } +} + +func (c *Client) GetChannelStats(id string, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetChannelRoute(id)+"/stats", "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelStatsFromJson(r.Body)}, nil + } +} + +func (c *Client) GetChannelMember(channelId string, userId string) (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/members/"+userId, "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelMemberFromJson(r.Body)}, nil + } +} + +// GetChannelMembersByIds will return channel member objects as an array based on the +// channel id and a list of user ids provided. Must be authenticated. +func (c *Client) GetChannelMembersByIds(channelId string, userIds []string) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/members/ids", ArrayToJson(userIds)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelMembersFromJson(r.Body)}, nil + } +} + +func (c *Client) CreatePost(post *Post) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetChannelRoute(post.ChannelId)+"/posts/create", post.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdatePost(post *Post) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetChannelRoute(post.ChannelId)+"/posts/update", post.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostFromJson(r.Body)}, nil + } +} + +func (c *Client) GetPosts(channelId string, offset int, limit int, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/page/%v/%v", offset, limit), "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil + } +} + +func (c *Client) GetPostsSince(channelId string, time int64) (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/since/%v", time), "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil + } +} + +func (c *Client) GetPostsBefore(channelId string, postid string, offset int, limit int, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/%v/before/%v/%v", postid, offset, limit), "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil + } +} + +func (c *Client) GetPostsAfter(channelId string, postid string, offset int, limit int, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(fmt.Sprintf(c.GetChannelRoute(channelId)+"/posts/%v/after/%v/%v", postid, offset, limit), "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil + } +} + +func (c *Client) GetPost(channelId string, postId string, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/%v/get", postId), "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil + } +} + +// GetPostById returns a post and any posts in the same thread by post id +func (c *Client) GetPostById(postId string, etag string) (*PostList, *ResponseMetadata) { + if r, err := c.DoApiGet(c.GetTeamRoute()+fmt.Sprintf("/posts/%v", postId), "", etag); err != nil { + return nil, &ResponseMetadata{StatusCode: r.StatusCode, Error: err} + } else { + defer closeBody(r) + return PostListFromJson(r.Body), + &ResponseMetadata{ + StatusCode: r.StatusCode, + RequestId: r.Header.Get(HEADER_REQUEST_ID), + Etag: r.Header.Get(HEADER_ETAG_SERVER), + } + } +} + +// GetPermalink returns a post list, based on the provided channel and post ID. +func (c *Client) GetPermalink(channelId string, postId string, etag string) (*PostList, *ResponseMetadata) { + if r, err := c.DoApiGet(c.GetTeamRoute()+fmt.Sprintf("/pltmp/%v", postId), "", etag); err != nil { + return nil, &ResponseMetadata{StatusCode: r.StatusCode, Error: err} + } else { + defer closeBody(r) + return PostListFromJson(r.Body), + &ResponseMetadata{ + StatusCode: r.StatusCode, + RequestId: r.Header.Get(HEADER_REQUEST_ID), + Etag: r.Header.Get(HEADER_ETAG_SERVER), + } + } +} + +func (c *Client) DeletePost(channelId string, postId string) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/%v/delete", postId), ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) SearchPosts(terms string, isOrSearch bool) (*Result, *AppError) { + data := map[string]interface{}{} + data["terms"] = terms + data["is_or_search"] = isOrSearch + if r, err := c.DoApiPost(c.GetTeamRoute()+"/posts/search", StringInterfaceToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil + } +} + +// GetFlaggedPosts will return a post list of posts that have been flagged by the user. +// The page is set by the integer parameters offset and limit. +func (c *Client) GetFlaggedPosts(offset int, limit int) (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetTeamRoute()+fmt.Sprintf("/posts/flagged/%v/%v", offset, limit), "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil + } +} + +func (c *Client) GetPinnedPosts(channelId string) (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/pinned", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil + } +} + +func (c *Client) UploadProfileFile(data []byte, contentType string) (*Result, *AppError) { + return c.uploadFile(c.ApiUrl+"/users/newimage", data, contentType) +} + +func (c *Client) UploadPostAttachment(data []byte, channelId string, filename string) (*FileUploadResponse, *AppError) { + c.clearExtraProperties() + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + if part, err := writer.CreateFormFile("files", filename); err != nil { + return nil, NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), 0) + } else if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { + return nil, NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), 0) + } + + if part, err := writer.CreateFormField("channel_id"); err != nil { + return nil, NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.channel_id.app_error", nil, err.Error(), 0) + } else if _, err = io.Copy(part, strings.NewReader(channelId)); err != nil { + return nil, NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.channel_id.app_error", nil, err.Error(), 0) + } + + if err := writer.Close(); err != nil { + return nil, NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.writer.app_error", nil, err.Error(), 0) + } + + if result, err := c.uploadFile(c.ApiUrl+c.GetTeamRoute()+"/files/upload", body.Bytes(), writer.FormDataContentType()); err != nil { + return nil, err + } else { + return result.Data.(*FileUploadResponse), nil + } +} + +func (c *Client) uploadFile(url string, data []byte, contentType string) (*Result, *AppError) { + rq, _ := http.NewRequest("POST", url, bytes.NewReader(data)) + rq.Header.Set("Content-Type", contentType) + rq.Close = true + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, "BEARER "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil { + return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0) + } else if rp.StatusCode >= 300 { + return nil, AppErrorFromJson(rp.Body) + } else { + defer closeBody(rp) + return &Result{rp.Header.Get(HEADER_REQUEST_ID), + rp.Header.Get(HEADER_ETAG_SERVER), FileUploadResponseFromJson(rp.Body)}, nil + } +} + +func (c *Client) GetFile(fileId string) (io.ReadCloser, *AppError) { + if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/get", "", ""); err != nil { + return nil, err + } else { + c.fillInExtraProperties(r) + return r.Body, nil + } +} + +func (c *Client) GetFileThumbnail(fileId string) (io.ReadCloser, *AppError) { + if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/get_thumbnail", "", ""); err != nil { + return nil, err + } else { + c.fillInExtraProperties(r) + return r.Body, nil + } +} + +func (c *Client) GetFilePreview(fileId string) (io.ReadCloser, *AppError) { + if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/get_preview", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + c.fillInExtraProperties(r) + return r.Body, nil + } +} + +func (c *Client) GetFileInfo(fileId string) (*FileInfo, *AppError) { + if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/get_info", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + c.fillInExtraProperties(r) + return FileInfoFromJson(r.Body), nil + } +} + +func (c *Client) GetPublicLink(fileId string) (string, *AppError) { + if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/get_public_link", "", ""); err != nil { + return "", err + } else { + defer closeBody(r) + c.fillInExtraProperties(r) + return StringFromJson(r.Body), nil + } +} + +func (c *Client) UpdateUser(user *User) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/update", user.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateUserRoles(userId string, roles string) (*Result, *AppError) { + data := make(map[string]string) + data["new_roles"] = roles + + if r, err := c.DoApiPost(c.GetUserRequiredRoute(userId)+"/update_roles", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateTeamRoles(userId string, roles string) (*Result, *AppError) { + data := make(map[string]string) + data["new_roles"] = roles + data["user_id"] = userId + + if r, err := c.DoApiPost(c.GetTeamRoute()+"/update_member_roles", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) AttachDeviceId(deviceId string) (*Result, *AppError) { + data := make(map[string]string) + data["device_id"] = deviceId + if r, err := c.DoApiPost("/users/attach_device", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateActive(userId string, active bool) (*Result, *AppError) { + data := make(map[string]string) + data["user_id"] = userId + data["active"] = strconv.FormatBool(active) + if r, err := c.DoApiPost("/users/update_active", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateUserNotify(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/update_notify", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateUserPassword(userId, currentPassword, newPassword string) (*Result, *AppError) { + data := make(map[string]string) + data["current_password"] = currentPassword + data["new_password"] = newPassword + data["user_id"] = userId + + if r, err := c.DoApiPost("/users/newpassword", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) SendPasswordReset(email string) (*Result, *AppError) { + data := map[string]string{} + data["email"] = email + if r, err := c.DoApiPost("/users/send_password_reset", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) ResetPassword(code, newPassword string) (*Result, *AppError) { + data := map[string]string{} + data["code"] = code + data["new_password"] = newPassword + if r, err := c.DoApiPost("/users/reset_password", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) AdminResetPassword(userId, newPassword string) (*Result, *AppError) { + data := map[string]string{} + data["user_id"] = userId + data["new_password"] = newPassword + if r, err := c.DoApiPost("/admin/reset_password", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +// GetStatuses returns a map of string statuses using user id as the key +func (c *Client) GetStatuses() (*Result, *AppError) { + if r, err := c.DoApiGet("/users/status", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +// GetStatusesByIds returns a map of string statuses using user id as the key, +// based on the provided user ids +func (c *Client) GetStatusesByIds(userIds []string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/status/ids", ArrayToJson(userIds)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) GetMyTeam(etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetTeamRoute()+"/me", "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamFromJson(r.Body)}, nil + } +} + +// GetTeamMembers will return a page of team member objects as an array paged based on the +// team id, offset and limit provided. Must be authenticated. +func (c *Client) GetTeamMembers(teamId string, offset int, limit int) (*Result, *AppError) { + if r, err := c.DoApiGet(fmt.Sprintf("/teams/%v/members/%v/%v", teamId, offset, limit), "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamMembersFromJson(r.Body)}, nil + } +} + +// GetMyTeamMembers will return an array with team member objects that the current user +// is a member of. Must be authenticated. +func (c *Client) GetMyTeamMembers() (*Result, *AppError) { + if r, err := c.DoApiGet("/teams/members", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamMembersFromJson(r.Body)}, nil + } +} + +// GetMyTeamsUnread will return an array with TeamUnread objects that contain the amount of +// unread messages and mentions the current user has for the teams it belongs to. +// An optional team ID can be set to exclude that team from the results. Must be authenticated. +func (c *Client) GetMyTeamsUnread(teamId string) (*Result, *AppError) { + endpoint := "/teams/unread" + + if teamId != "" { + endpoint += fmt.Sprintf("?id=%s", url.QueryEscape(teamId)) + } + if r, err := c.DoApiGet(endpoint, "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamsUnreadFromJson(r.Body)}, nil + } +} + +// GetTeamMember will return a team member object based on the team id and user id provided. +// Must be authenticated. +func (c *Client) GetTeamMember(teamId string, userId string) (*Result, *AppError) { + if r, err := c.DoApiGet(fmt.Sprintf("/teams/%v/members/%v", teamId, userId), "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamMemberFromJson(r.Body)}, nil + } +} + +// GetTeamStats will return a team stats object containing the number of users on the team +// based on the team id provided. Must be authenticated. +func (c *Client) GetTeamStats(teamId string) (*Result, *AppError) { + if r, err := c.DoApiGet(fmt.Sprintf("/teams/%v/stats", teamId), "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamStatsFromJson(r.Body)}, nil + } +} + +// GetTeamByName will return a team object based on the team name provided. Must be authenticated. +func (c *Client) GetTeamByName(teamName string) (*Result, *AppError) { + if r, err := c.DoApiGet(fmt.Sprintf("/teams/name/%v", teamName), "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamFromJson(r.Body)}, nil + } +} + +// GetTeamMembersByIds will return team member objects as an array based on the +// team id and a list of user ids provided. Must be authenticated. +func (c *Client) GetTeamMembersByIds(teamId string, userIds []string) (*Result, *AppError) { + if r, err := c.DoApiPost(fmt.Sprintf("/teams/%v/members/ids", teamId), ArrayToJson(userIds)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamMembersFromJson(r.Body)}, nil + } +} + +// RegisterApp creates a new OAuth2 app to be used with the OAuth2 Provider. On success +// it returns the created app. Must be authenticated as a user. +func (c *Client) RegisterApp(app *OAuthApp) (*Result, *AppError) { + if r, err := c.DoApiPost("/oauth/register", app.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), OAuthAppFromJson(r.Body)}, nil + } +} + +// AllowOAuth allows a new session by an OAuth2 App. On success +// it returns the url to be redirected back to the app which initiated the oauth2 flow. +// Must be authenticated as a user. +func (c *Client) AllowOAuth(rspType, clientId, redirect, scope, state string) (*Result, *AppError) { + if r, err := c.DoApiGet("/oauth/allow?response_type="+rspType+"&client_id="+clientId+"&redirect_uri="+url.QueryEscape(redirect)+"&scope="+scope+"&state="+url.QueryEscape(state), "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +// GetOAuthAppsByUser returns the OAuth2 Apps registered by the user. On success +// it returns a list of OAuth2 Apps from the same user or all the registered apps if the user +// is a System Administrator. Must be authenticated as a user. +func (c *Client) GetOAuthAppsByUser() (*Result, *AppError) { + if r, err := c.DoApiGet("/oauth/list", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), OAuthAppListFromJson(r.Body)}, nil + } +} + +// GetOAuthAppInfo lookup an OAuth2 App using the client_id. On success +// it returns a Sanitized OAuth2 App. Must be authenticated as a user. +func (c *Client) GetOAuthAppInfo(clientId string) (*Result, *AppError) { + if r, err := c.DoApiGet("/oauth/app/"+clientId, "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), OAuthAppFromJson(r.Body)}, nil + } +} + +// DeleteOAuthApp deletes an OAuth2 app, the app must be deleted by the same user who created it or +// a System Administrator. On success returs Status OK. Must be authenticated as a user. +func (c *Client) DeleteOAuthApp(id string) (*Result, *AppError) { + data := make(map[string]string) + data["id"] = id + if r, err := c.DoApiPost("/oauth/delete", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +// GetOAuthAuthorizedApps returns the OAuth2 Apps authorized by the user. On success +// it returns a list of sanitized OAuth2 Authorized Apps by the user. +func (c *Client) GetOAuthAuthorizedApps() (*Result, *AppError) { + if r, err := c.DoApiGet("/oauth/authorized", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), OAuthAppListFromJson(r.Body)}, nil + } +} + +// OAuthDeauthorizeApp deauthorize a user an OAuth 2.0 app. On success +// it returns status OK or an AppError on fail. +func (c *Client) OAuthDeauthorizeApp(clientId string) *AppError { + if r, err := c.DoApiPost("/oauth/"+clientId+"/deauthorize", ""); err != nil { + return err + } else { + defer closeBody(r) + return nil + } +} + +// RegenerateOAuthAppSecret generates a new OAuth App Client Secret. On success +// it returns an OAuth2 App. Must be authenticated as a user and the same user who +// registered the app or a System Admin. +func (c *Client) RegenerateOAuthAppSecret(clientId string) (*Result, *AppError) { + if r, err := c.DoApiPost("/oauth/"+clientId+"/regen_secret", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), OAuthAppFromJson(r.Body)}, nil + } +} + +func (c *Client) GetAccessToken(data url.Values) (*Result, *AppError) { + if r, err := c.DoPost("/oauth/access_token", data.Encode(), "application/x-www-form-urlencoded"); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), AccessResponseFromJson(r.Body)}, nil + } +} + +func (c *Client) CreateIncomingWebhook(hook *IncomingWebhook) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/hooks/incoming/create", hook.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), IncomingWebhookFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateIncomingWebhook(hook *IncomingWebhook) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/hooks/incoming/update", hook.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), IncomingWebhookFromJson(r.Body)}, nil + } +} + +func (c *Client) PostToWebhook(id, payload string) (*Result, *AppError) { + if r, err := c.DoPost("/hooks/"+id, payload, "application/x-www-form-urlencoded"); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), nil}, nil + } +} + +func (c *Client) DeleteIncomingWebhook(id string) (*Result, *AppError) { + data := make(map[string]string) + data["id"] = id + if r, err := c.DoApiPost(c.GetTeamRoute()+"/hooks/incoming/delete", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) ListIncomingWebhooks() (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetTeamRoute()+"/hooks/incoming/list", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), IncomingWebhookListFromJson(r.Body)}, nil + } +} + +func (c *Client) GetAllPreferences() (*Result, *AppError) { + if r, err := c.DoApiGet("/preferences/", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + preferences, _ := PreferencesFromJson(r.Body) + return &Result{r.Header.Get(HEADER_REQUEST_ID), r.Header.Get(HEADER_ETAG_SERVER), preferences}, nil + } +} + +func (c *Client) SetPreferences(preferences *Preferences) (*Result, *AppError) { + if r, err := c.DoApiPost("/preferences/save", preferences.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), preferences}, nil + } +} + +func (c *Client) GetPreference(category string, name string) (*Result, *AppError) { + if r, err := c.DoApiGet("/preferences/"+category+"/"+name, "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), r.Header.Get(HEADER_ETAG_SERVER), PreferenceFromJson(r.Body)}, nil + } +} + +func (c *Client) GetPreferenceCategory(category string) (*Result, *AppError) { + if r, err := c.DoApiGet("/preferences/"+category, "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + preferences, _ := PreferencesFromJson(r.Body) + return &Result{r.Header.Get(HEADER_REQUEST_ID), r.Header.Get(HEADER_ETAG_SERVER), preferences}, nil + } +} + +// DeletePreferences deletes a list of preferences owned by the current user. If successful, +// it will return status=ok. Otherwise, an error will be returned. +func (c *Client) DeletePreferences(preferences *Preferences) (bool, *AppError) { + if r, err := c.DoApiPost("/preferences/delete", preferences.ToJson()); err != nil { + return false, err + } else { + return c.CheckStatusOK(r), nil + } +} + +func (c *Client) CreateOutgoingWebhook(hook *OutgoingWebhook) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/hooks/outgoing/create", hook.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), OutgoingWebhookFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateOutgoingWebhook(hook *OutgoingWebhook) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetTeamRoute()+"/hooks/outgoing/update", hook.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), OutgoingWebhookFromJson(r.Body)}, nil + } +} + +func (c *Client) DeleteOutgoingWebhook(id string) (*Result, *AppError) { + data := make(map[string]string) + data["id"] = id + if r, err := c.DoApiPost(c.GetTeamRoute()+"/hooks/outgoing/delete", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) ListOutgoingWebhooks() (*Result, *AppError) { + if r, err := c.DoApiGet(c.GetTeamRoute()+"/hooks/outgoing/list", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), OutgoingWebhookListFromJson(r.Body)}, nil + } +} + +func (c *Client) RegenOutgoingWebhookToken(id string) (*Result, *AppError) { + data := make(map[string]string) + data["id"] = id + if r, err := c.DoApiPost(c.GetTeamRoute()+"/hooks/outgoing/regen_token", MapToJson(data)); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), OutgoingWebhookFromJson(r.Body)}, nil + } +} + +func (c *Client) MockSession(sessionToken string) { + c.AuthToken = sessionToken + c.AuthType = HEADER_BEARER +} + +func (c *Client) GetClientLicenceConfig(etag string) (*Result, *AppError) { + if r, err := c.DoApiGet("/license/client_config", "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) GetInitialLoad() (*Result, *AppError) { + if r, err := c.DoApiGet("/users/initial_load", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), InitialLoadFromJson(r.Body)}, nil + } +} + +// ListEmoji returns a list of all user-created emoji for the server. +func (c *Client) ListEmoji() ([]*Emoji, *AppError) { + if r, err := c.DoApiGet(c.GetEmojiRoute()+"/list", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + c.fillInExtraProperties(r) + return EmojiListFromJson(r.Body), nil + } +} + +// CreateEmoji will save an emoji to the server if the current user has permission +// to do so. If successful, the provided emoji will be returned with its Id field +// filled in. Otherwise, an error will be returned. +func (c *Client) CreateEmoji(emoji *Emoji, image []byte, filename string) (*Emoji, *AppError) { + c.clearExtraProperties() + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + if part, err := writer.CreateFormFile("image", filename); err != nil { + return nil, NewAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error(), 0) + } else if _, err = io.Copy(part, bytes.NewBuffer(image)); err != nil { + return nil, NewAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error(), 0) + } + + if err := writer.WriteField("emoji", emoji.ToJson()); err != nil { + return nil, NewAppError("CreateEmoji", "model.client.create_emoji.emoji.app_error", nil, err.Error(), 0) + } + + if err := writer.Close(); err != nil { + return nil, NewAppError("CreateEmoji", "model.client.create_emoji.writer.app_error", nil, err.Error(), 0) + } + + rq, _ := http.NewRequest("POST", c.ApiUrl+c.GetEmojiRoute()+"/create", body) + rq.Header.Set("Content-Type", writer.FormDataContentType()) + rq.Close = true + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, "BEARER "+c.AuthToken) + } + + if r, err := c.HttpClient.Do(rq); err != nil { + return nil, NewAppError("CreateEmoji", "model.client.connecting.app_error", nil, err.Error(), 0) + } else if r.StatusCode >= 300 { + return nil, AppErrorFromJson(r.Body) + } else { + defer closeBody(r) + c.fillInExtraProperties(r) + return EmojiFromJson(r.Body), nil + } +} + +// DeleteEmoji will delete an emoji from the server if the current user has permission +// to do so. If successful, it will return status=ok. Otherwise, an error will be returned. +func (c *Client) DeleteEmoji(id string) (bool, *AppError) { + data := map[string]string{"id": id} + + if r, err := c.DoApiPost(c.GetEmojiRoute()+"/delete", MapToJson(data)); err != nil { + return false, err + } else { + defer closeBody(r) + c.fillInExtraProperties(r) + return c.CheckStatusOK(r), nil + } +} + +// GetCustomEmojiImageUrl returns the API route that can be used to get the image used by +// the given emoji. +func (c *Client) GetCustomEmojiImageUrl(id string) string { + return c.GetEmojiRoute() + "/" + id +} + +// Uploads a x509 base64 Certificate or Private Key file to be used with SAML. +// data byte array is required and needs to be a Multi-Part with 'certificate' as the field name +// contentType is also required. Returns nil if succesful, otherwise returns an AppError +func (c *Client) UploadCertificateFile(data []byte, contentType string) *AppError { + url := c.ApiUrl + "/admin/add_certificate" + rq, _ := http.NewRequest("POST", url, bytes.NewReader(data)) + rq.Header.Set("Content-Type", contentType) + rq.Close = true + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, "BEARER "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil { + return NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0) + } else if rp.StatusCode >= 300 { + return AppErrorFromJson(rp.Body) + } else { + defer closeBody(rp) + c.fillInExtraProperties(rp) + return nil + } +} + +// Removes a x509 base64 Certificate or Private Key file used with SAML. +// filename is required. Returns nil if successful, otherwise returns an AppError +func (c *Client) RemoveCertificateFile(filename string) *AppError { + if r, err := c.DoApiPost("/admin/remove_certificate", MapToJson(map[string]string{"filename": filename})); err != nil { + return err + } else { + defer closeBody(r) + c.fillInExtraProperties(r) + return nil + } +} + +// Checks if the x509 base64 Certificates and Private Key files used with SAML exists on the file system. +// Returns a map[string]interface{} if successful, otherwise returns an AppError. Must be System Admin authenticated. +func (c *Client) SamlCertificateStatus(filename string) (map[string]interface{}, *AppError) { + if r, err := c.DoApiGet("/admin/remove_certificate", "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + c.fillInExtraProperties(r) + return StringInterfaceFromJson(r.Body), nil + } +} + +// GetWebrtcToken if Successful returns a map with a valid token, stun server and turn server with credentials to use with +// the Mattermost WebRTC service, otherwise returns an AppError. Must be authenticated user. +func (c *Client) GetWebrtcToken() (map[string]string, *AppError) { + if r, err := c.DoApiPost("/webrtc/token", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return MapFromJson(r.Body), nil + } +} + +// GetFileInfosForPost returns a list of FileInfo objects for a given post id, if successful. +// Otherwise, it returns an error. +func (c *Client) GetFileInfosForPost(channelId string, postId string, etag string) ([]*FileInfo, *AppError) { + c.clearExtraProperties() + + if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/%v/get_file_infos", postId), "", etag); err != nil { + return nil, err + } else { + defer closeBody(r) + c.fillInExtraProperties(r) + return FileInfosFromJson(r.Body), nil + } +} + +// Saves an emoji reaction for a post in the given channel. Returns the saved reaction if successful, otherwise returns an AppError. +func (c *Client) SaveReaction(channelId string, reaction *Reaction) (*Reaction, *AppError) { + if r, err := c.DoApiPost(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/%v/reactions/save", reaction.PostId), reaction.ToJson()); err != nil { + return nil, err + } else { + defer closeBody(r) + c.fillInExtraProperties(r) + return ReactionFromJson(r.Body), nil + } +} + +// Removes an emoji reaction for a post in the given channel. Returns nil if successful, otherwise returns an AppError. +func (c *Client) DeleteReaction(channelId string, reaction *Reaction) *AppError { + if r, err := c.DoApiPost(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/%v/reactions/delete", reaction.PostId), reaction.ToJson()); err != nil { + return err + } else { + defer closeBody(r) + c.fillInExtraProperties(r) + return nil + } +} + +// Lists all emoji reactions made for the given post in the given channel. Returns a list of Reactions if successful, otherwise returns an AppError. +func (c *Client) ListReactions(channelId string, postId string) ([]*Reaction, *AppError) { + if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+fmt.Sprintf("/posts/%v/reactions", postId), "", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + c.fillInExtraProperties(r) + return ReactionsFromJson(r.Body), nil + } +} + +// Updates the user's roles in the channel by replacing them with the roles provided. +func (c *Client) UpdateChannelRoles(channelId string, userId string, roles string) (map[string]string, *ResponseMetadata) { + data := make(map[string]string) + data["new_roles"] = roles + data["user_id"] = userId + + if r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/update_member_roles", MapToJson(data)); err != nil { + metadata := ResponseMetadata{Error: err} + if r != nil { + metadata.StatusCode = r.StatusCode + } + return nil, &metadata + } else { + defer closeBody(r) + return MapFromJson(r.Body), + &ResponseMetadata{ + StatusCode: r.StatusCode, + RequestId: r.Header.Get(HEADER_REQUEST_ID), + Etag: r.Header.Get(HEADER_ETAG_SERVER), + } + } +} + +func (c *Client) PinPost(channelId string, postId string) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/posts/"+postId+"/pin", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostFromJson(r.Body)}, nil + } +} + +func (c *Client) UnpinPost(channelId string, postId string) (*Result, *AppError) { + if r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/posts/"+postId+"/unpin", ""); err != nil { + return nil, err + } else { + defer closeBody(r) + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostFromJson(r.Body)}, nil + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/client4.go b/vendor/github.com/mattermost/mattermost-server/model/client4.go new file mode 100644 index 00000000..962b816b --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/client4.go @@ -0,0 +1,3299 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strconv" + "strings" +) + +type Response struct { + StatusCode int + Error *AppError + RequestId string + Etag string + ServerVersion string + Header http.Header +} + +type Client4 struct { + Url string // The location of the server, for example "http://localhost:8065" + ApiUrl string // The api location of the server, for example "http://localhost:8065/api/v4" + HttpClient *http.Client // The http client + AuthToken string + AuthType string +} + +func NewAPIv4Client(url string) *Client4 { + return &Client4{url, url + API_URL_SUFFIX, &http.Client{}, "", ""} +} + +func BuildErrorResponse(r *http.Response, err *AppError) *Response { + var statusCode int + var header http.Header + if r != nil { + statusCode = r.StatusCode + header = r.Header + } else { + statusCode = 0 + header = make(http.Header) + } + + return &Response{ + StatusCode: statusCode, + Error: err, + Header: header, + } +} + +func BuildResponse(r *http.Response) *Response { + return &Response{ + StatusCode: r.StatusCode, + RequestId: r.Header.Get(HEADER_REQUEST_ID), + Etag: r.Header.Get(HEADER_ETAG_SERVER), + ServerVersion: r.Header.Get(HEADER_VERSION_ID), + Header: r.Header, + } +} + +func (c *Client4) SetOAuthToken(token string) { + c.AuthToken = token + c.AuthType = HEADER_TOKEN +} + +func (c *Client4) ClearOAuthToken() { + c.AuthToken = "" + c.AuthType = HEADER_BEARER +} + +func (c *Client4) GetUsersRoute() string { + return fmt.Sprintf("/users") +} + +func (c *Client4) GetUserRoute(userId string) string { + return fmt.Sprintf(c.GetUsersRoute()+"/%v", userId) +} + +func (c *Client4) GetUserAccessTokensRoute() string { + return fmt.Sprintf(c.GetUsersRoute() + "/tokens") +} + +func (c *Client4) GetUserAccessTokenRoute(tokenId string) string { + return fmt.Sprintf(c.GetUsersRoute()+"/tokens/%v", tokenId) +} + +func (c *Client4) GetUserByUsernameRoute(userName string) string { + return fmt.Sprintf(c.GetUsersRoute()+"/username/%v", userName) +} + +func (c *Client4) GetUserByEmailRoute(email string) string { + return fmt.Sprintf(c.GetUsersRoute()+"/email/%v", email) +} + +func (c *Client4) GetTeamsRoute() string { + return fmt.Sprintf("/teams") +} + +func (c *Client4) GetTeamRoute(teamId string) string { + return fmt.Sprintf(c.GetTeamsRoute()+"/%v", teamId) +} + +func (c *Client4) GetTeamAutoCompleteCommandsRoute(teamId string) string { + return fmt.Sprintf(c.GetTeamsRoute()+"/%v/commands/autocomplete", teamId) +} + +func (c *Client4) GetTeamByNameRoute(teamName string) string { + return fmt.Sprintf(c.GetTeamsRoute()+"/name/%v", teamName) +} + +func (c *Client4) GetTeamMemberRoute(teamId, userId string) string { + return fmt.Sprintf(c.GetTeamRoute(teamId)+"/members/%v", userId) +} + +func (c *Client4) GetTeamMembersRoute(teamId string) string { + return fmt.Sprintf(c.GetTeamRoute(teamId) + "/members") +} + +func (c *Client4) GetTeamStatsRoute(teamId string) string { + return fmt.Sprintf(c.GetTeamRoute(teamId) + "/stats") +} + +func (c *Client4) GetTeamImportRoute(teamId string) string { + return fmt.Sprintf(c.GetTeamRoute(teamId) + "/import") +} + +func (c *Client4) GetChannelsRoute() string { + return fmt.Sprintf("/channels") +} + +func (c *Client4) GetChannelsForTeamRoute(teamId string) string { + return fmt.Sprintf(c.GetTeamRoute(teamId) + "/channels") +} + +func (c *Client4) GetChannelRoute(channelId string) string { + return fmt.Sprintf(c.GetChannelsRoute()+"/%v", channelId) +} + +func (c *Client4) GetChannelByNameRoute(channelName, teamId string) string { + return fmt.Sprintf(c.GetTeamRoute(teamId)+"/channels/name/%v", channelName) +} + +func (c *Client4) GetChannelByNameForTeamNameRoute(channelName, teamName string) string { + return fmt.Sprintf(c.GetTeamByNameRoute(teamName)+"/channels/name/%v", channelName) +} + +func (c *Client4) GetChannelMembersRoute(channelId string) string { + return fmt.Sprintf(c.GetChannelRoute(channelId) + "/members") +} + +func (c *Client4) GetChannelMemberRoute(channelId, userId string) string { + return fmt.Sprintf(c.GetChannelMembersRoute(channelId)+"/%v", userId) +} + +func (c *Client4) GetPostsRoute() string { + return fmt.Sprintf("/posts") +} + +func (c *Client4) GetConfigRoute() string { + return fmt.Sprintf("/config") +} + +func (c *Client4) GetLicenseRoute() string { + return fmt.Sprintf("/license") +} + +func (c *Client4) GetPostRoute(postId string) string { + return fmt.Sprintf(c.GetPostsRoute()+"/%v", postId) +} + +func (c *Client4) GetFilesRoute() string { + return fmt.Sprintf("/files") +} + +func (c *Client4) GetFileRoute(fileId string) string { + return fmt.Sprintf(c.GetFilesRoute()+"/%v", fileId) +} + +func (c *Client4) GetPluginsRoute() string { + return fmt.Sprintf("/plugins") +} + +func (c *Client4) GetPluginRoute(pluginId string) string { + return fmt.Sprintf(c.GetPluginsRoute()+"/%v", pluginId) +} + +func (c *Client4) GetSystemRoute() string { + return fmt.Sprintf("/system") +} + +func (c *Client4) GetTestEmailRoute() string { + return fmt.Sprintf("/email/test") +} + +func (c *Client4) GetDatabaseRoute() string { + return fmt.Sprintf("/database") +} + +func (c *Client4) GetCacheRoute() string { + return fmt.Sprintf("/caches") +} + +func (c *Client4) GetClusterRoute() string { + return fmt.Sprintf("/cluster") +} + +func (c *Client4) GetIncomingWebhooksRoute() string { + return fmt.Sprintf("/hooks/incoming") +} + +func (c *Client4) GetIncomingWebhookRoute(hookID string) string { + return fmt.Sprintf(c.GetIncomingWebhooksRoute()+"/%v", hookID) +} + +func (c *Client4) GetComplianceReportsRoute() string { + return fmt.Sprintf("/compliance/reports") +} + +func (c *Client4) GetComplianceReportRoute(reportId string) string { + return fmt.Sprintf("/compliance/reports/%v", reportId) +} + +func (c *Client4) GetOutgoingWebhooksRoute() string { + return fmt.Sprintf("/hooks/outgoing") +} + +func (c *Client4) GetOutgoingWebhookRoute(hookID string) string { + return fmt.Sprintf(c.GetOutgoingWebhooksRoute()+"/%v", hookID) +} + +func (c *Client4) GetPreferencesRoute(userId string) string { + return fmt.Sprintf(c.GetUserRoute(userId) + "/preferences") +} + +func (c *Client4) GetUserStatusRoute(userId string) string { + return fmt.Sprintf(c.GetUserRoute(userId) + "/status") +} + +func (c *Client4) GetUserStatusesRoute() string { + return fmt.Sprintf(c.GetUsersRoute() + "/status") +} + +func (c *Client4) GetSamlRoute() string { + return fmt.Sprintf("/saml") +} + +func (c *Client4) GetLdapRoute() string { + return fmt.Sprintf("/ldap") +} + +func (c *Client4) GetBrandRoute() string { + return fmt.Sprintf("/brand") +} + +func (c *Client4) GetDataRetentionRoute() string { + return fmt.Sprintf("/data_retention") +} + +func (c *Client4) GetElasticsearchRoute() string { + return fmt.Sprintf("/elasticsearch") +} + +func (c *Client4) GetCommandsRoute() string { + return fmt.Sprintf("/commands") +} + +func (c *Client4) GetCommandRoute(commandId string) string { + return fmt.Sprintf(c.GetCommandsRoute()+"/%v", commandId) +} + +func (c *Client4) GetEmojisRoute() string { + return fmt.Sprintf("/emoji") +} + +func (c *Client4) GetEmojiRoute(emojiId string) string { + return fmt.Sprintf(c.GetEmojisRoute()+"/%v", emojiId) +} + +func (c *Client4) GetEmojiByNameRoute(name string) string { + return fmt.Sprintf(c.GetEmojisRoute()+"/name/%v", name) +} + +func (c *Client4) GetReactionsRoute() string { + return fmt.Sprintf("/reactions") +} + +func (c *Client4) GetOAuthAppsRoute() string { + return fmt.Sprintf("/oauth/apps") +} + +func (c *Client4) GetOAuthAppRoute(appId string) string { + return fmt.Sprintf("/oauth/apps/%v", appId) +} + +func (c *Client4) GetOpenGraphRoute() string { + return fmt.Sprintf("/opengraph") +} + +func (c *Client4) GetJobsRoute() string { + return fmt.Sprintf("/jobs") +} + +func (c *Client4) GetAnalyticsRoute() string { + return fmt.Sprintf("/analytics") +} + +func (c *Client4) DoApiGet(url string, etag string) (*http.Response, *AppError) { + return c.DoApiRequest(http.MethodGet, c.ApiUrl+url, "", etag) +} + +func (c *Client4) DoApiPost(url string, data string) (*http.Response, *AppError) { + return c.DoApiRequest(http.MethodPost, c.ApiUrl+url, data, "") +} + +func (c *Client4) DoApiPut(url string, data string) (*http.Response, *AppError) { + return c.DoApiRequest(http.MethodPut, c.ApiUrl+url, data, "") +} + +func (c *Client4) DoApiDelete(url string) (*http.Response, *AppError) { + return c.DoApiRequest(http.MethodDelete, c.ApiUrl+url, "", "") +} + +func (c *Client4) DoApiRequest(method, url, data, etag string) (*http.Response, *AppError) { + rq, _ := http.NewRequest(method, url, strings.NewReader(data)) + rq.Close = true + + if len(etag) > 0 { + rq.Header.Set(HEADER_ETAG_CLIENT, etag) + } + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil { + return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0) + } else if rp.StatusCode == 304 { + return rp, nil + } else if rp.StatusCode >= 300 { + defer closeBody(rp) + return rp, AppErrorFromJson(rp.Body) + } else { + return rp, nil + } +} + +func (c *Client4) DoUploadFile(url string, data []byte, contentType string) (*FileUploadResponse, *Response) { + rq, _ := http.NewRequest("POST", c.ApiUrl+url, bytes.NewReader(data)) + rq.Header.Set("Content-Type", contentType) + rq.Close = true + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil { + return nil, BuildErrorResponse(rp, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0)) + } else { + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) + } else { + return FileUploadResponseFromJson(rp.Body), BuildResponse(rp) + } + } +} + +func (c *Client4) DoEmojiUploadFile(url string, data []byte, contentType string) (*Emoji, *Response) { + rq, _ := http.NewRequest("POST", c.ApiUrl+url, bytes.NewReader(data)) + rq.Header.Set("Content-Type", contentType) + rq.Close = true + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil { + return nil, BuildErrorResponse(rp, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0)) + } else { + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) + } else { + return EmojiFromJson(rp.Body), BuildResponse(rp) + } + } +} + +func (c *Client4) DoUploadImportTeam(url string, data []byte, contentType string) (map[string]string, *Response) { + rq, _ := http.NewRequest("POST", c.ApiUrl+url, bytes.NewReader(data)) + rq.Header.Set("Content-Type", contentType) + rq.Close = true + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil { + return nil, BuildErrorResponse(rp, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0)) + } else { + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) + } else { + return MapFromJson(rp.Body), BuildResponse(rp) + } + } +} + +// CheckStatusOK is a convenience function for checking the standard OK response +// from the web service. +func CheckStatusOK(r *http.Response) bool { + m := MapFromJson(r.Body) + defer closeBody(r) + + if m != nil && m[STATUS] == STATUS_OK { + return true + } + + return false +} + +// Authentication Section + +// LoginById authenticates a user by user id and password. +func (c *Client4) LoginById(id string, password string) (*User, *Response) { + m := make(map[string]string) + m["id"] = id + m["password"] = password + return c.login(m) +} + +// Login authenticates a user by login id, which can be username, email or some sort +// of SSO identifier based on server configuration, and a password. +func (c *Client4) Login(loginId string, password string) (*User, *Response) { + m := make(map[string]string) + m["login_id"] = loginId + m["password"] = password + return c.login(m) +} + +// LoginByLdap authenticates a user by LDAP id and password. +func (c *Client4) LoginByLdap(loginId string, password string) (*User, *Response) { + m := make(map[string]string) + m["login_id"] = loginId + m["password"] = password + m["ldap_only"] = "true" + return c.login(m) +} + +// LoginWithDevice authenticates a user by login id (username, email or some sort +// of SSO identifier based on configuration), password and attaches a device id to +// the session. +func (c *Client4) LoginWithDevice(loginId string, password string, deviceId string) (*User, *Response) { + m := make(map[string]string) + m["login_id"] = loginId + m["password"] = password + m["device_id"] = deviceId + return c.login(m) +} + +func (c *Client4) login(m map[string]string) (*User, *Response) { + if r, err := c.DoApiPost("/users/login", MapToJson(m)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + c.AuthToken = r.Header.Get(HEADER_TOKEN) + c.AuthType = HEADER_BEARER + defer closeBody(r) + return UserFromJson(r.Body), BuildResponse(r) + } +} + +// Logout terminates the current user's session. +func (c *Client4) Logout() (bool, *Response) { + if r, err := c.DoApiPost("/users/logout", ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + c.AuthToken = "" + c.AuthType = HEADER_BEARER + + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// SwitchAccountType changes a user's login type from one type to another. +func (c *Client4) SwitchAccountType(switchRequest *SwitchRequest) (string, *Response) { + if r, err := c.DoApiPost(c.GetUsersRoute()+"/login/switch", switchRequest.ToJson()); err != nil { + return "", BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return MapFromJson(r.Body)["follow_link"], BuildResponse(r) + } +} + +// User Section + +// CreateUser creates a user in the system based on the provided user struct. +func (c *Client4) CreateUser(user *User) (*User, *Response) { + if r, err := c.DoApiPost(c.GetUsersRoute(), user.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserFromJson(r.Body), BuildResponse(r) + } +} + +// CreateUserWithHash creates a user in the system based on the provided user struct and hash created. +func (c *Client4) CreateUserWithHash(user *User, hash, data string) (*User, *Response) { + var query string + if hash != "" && data != "" { + query = fmt.Sprintf("?d=%v&h=%v", url.QueryEscape(data), hash) + } else { + err := NewAppError("MissingHashOrData", "api.user.create_user.missing_hash_or_data.app_error", nil, "", http.StatusBadRequest) + return nil, &Response{StatusCode: err.StatusCode, Error: err} + } + if r, err := c.DoApiPost(c.GetUsersRoute()+query, user.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserFromJson(r.Body), BuildResponse(r) + } +} + +// CreateUserWithInviteId creates a user in the system based on the provided invited id. +func (c *Client4) CreateUserWithInviteId(user *User, inviteId string) (*User, *Response) { + var query string + if inviteId != "" { + query = fmt.Sprintf("?iid=%v", url.QueryEscape(inviteId)) + } else { + err := NewAppError("MissingInviteId", "api.user.create_user.missing_invite_id.app_error", nil, "", http.StatusBadRequest) + return nil, &Response{StatusCode: err.StatusCode, Error: err} + } + if r, err := c.DoApiPost(c.GetUsersRoute()+query, user.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserFromJson(r.Body), BuildResponse(r) + } +} + +// GetMe returns the logged in user. +func (c *Client4) GetMe(etag string) (*User, *Response) { + if r, err := c.DoApiGet(c.GetUserRoute(ME), etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserFromJson(r.Body), BuildResponse(r) + } +} + +// GetUser returns a user based on the provided user id string. +func (c *Client4) GetUser(userId, etag string) (*User, *Response) { + if r, err := c.DoApiGet(c.GetUserRoute(userId), etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserFromJson(r.Body), BuildResponse(r) + } +} + +// GetUserByUsername returns a user based on the provided user name string. +func (c *Client4) GetUserByUsername(userName, etag string) (*User, *Response) { + if r, err := c.DoApiGet(c.GetUserByUsernameRoute(userName), etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserFromJson(r.Body), BuildResponse(r) + } +} + +// GetUserByEmail returns a user based on the provided user email string. +func (c *Client4) GetUserByEmail(email, etag string) (*User, *Response) { + if r, err := c.DoApiGet(c.GetUserByEmailRoute(email), etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserFromJson(r.Body), BuildResponse(r) + } +} + +// AutocompleteUsersInTeam returns the users on a team based on search term. +func (c *Client4) AutocompleteUsersInTeam(teamId string, username string, etag string) (*UserAutocomplete, *Response) { + query := fmt.Sprintf("?in_team=%v&name=%v", teamId, username) + if r, err := c.DoApiGet(c.GetUsersRoute()+"/autocomplete"+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserAutocompleteFromJson(r.Body), BuildResponse(r) + } +} + +// AutocompleteUsersInChannel returns the users in a channel based on search term. +func (c *Client4) AutocompleteUsersInChannel(teamId string, channelId string, username string, etag string) (*UserAutocomplete, *Response) { + query := fmt.Sprintf("?in_team=%v&in_channel=%v&name=%v", teamId, channelId, username) + if r, err := c.DoApiGet(c.GetUsersRoute()+"/autocomplete"+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserAutocompleteFromJson(r.Body), BuildResponse(r) + } +} + +// AutocompleteUsers returns the users in the system based on search term. +func (c *Client4) AutocompleteUsers(username string, etag string) (*UserAutocomplete, *Response) { + query := fmt.Sprintf("?name=%v", username) + if r, err := c.DoApiGet(c.GetUsersRoute()+"/autocomplete"+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserAutocompleteFromJson(r.Body), BuildResponse(r) + } +} + +// GetProfileImage gets user's profile image. Must be logged in or be a system administrator. +func (c *Client4) GetProfileImage(userId, etag string) ([]byte, *Response) { + if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/image", etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + + if data, err := ioutil.ReadAll(r.Body); err != nil { + return nil, BuildErrorResponse(r, NewAppError("GetProfileImage", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) + } else { + return data, BuildResponse(r) + } + } +} + +// GetUsers returns a page of users on the system. Page counting starts at 0. +func (c *Client4) GetUsers(page int, perPage int, etag string) ([]*User, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetUsersRoute()+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserListFromJson(r.Body), BuildResponse(r) + } +} + +// GetUsersInTeam returns a page of users on a team. Page counting starts at 0. +func (c *Client4) GetUsersInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response) { + query := fmt.Sprintf("?in_team=%v&page=%v&per_page=%v", teamId, page, perPage) + if r, err := c.DoApiGet(c.GetUsersRoute()+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserListFromJson(r.Body), BuildResponse(r) + } +} + +// GetNewUsersInTeam returns a page of users on a team. Page counting starts at 0. +func (c *Client4) GetNewUsersInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response) { + query := fmt.Sprintf("?sort=create_at&in_team=%v&page=%v&per_page=%v", teamId, page, perPage) + if r, err := c.DoApiGet(c.GetUsersRoute()+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserListFromJson(r.Body), BuildResponse(r) + } +} + +// GetRecentlyActiveUsersInTeam returns a page of users on a team. Page counting starts at 0. +func (c *Client4) GetRecentlyActiveUsersInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response) { + query := fmt.Sprintf("?sort=last_activity_at&in_team=%v&page=%v&per_page=%v", teamId, page, perPage) + if r, err := c.DoApiGet(c.GetUsersRoute()+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserListFromJson(r.Body), BuildResponse(r) + } +} + +// GetUsersNotInTeam returns a page of users who are not in a team. Page counting starts at 0. +func (c *Client4) GetUsersNotInTeam(teamId string, page int, perPage int, etag string) ([]*User, *Response) { + query := fmt.Sprintf("?not_in_team=%v&page=%v&per_page=%v", teamId, page, perPage) + if r, err := c.DoApiGet(c.GetUsersRoute()+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserListFromJson(r.Body), BuildResponse(r) + } +} + +// GetUsersInChannel returns a page of users on a team. Page counting starts at 0. +func (c *Client4) GetUsersInChannel(channelId string, page int, perPage int, etag string) ([]*User, *Response) { + query := fmt.Sprintf("?in_channel=%v&page=%v&per_page=%v", channelId, page, perPage) + if r, err := c.DoApiGet(c.GetUsersRoute()+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserListFromJson(r.Body), BuildResponse(r) + } +} + +// GetUsersNotInChannel returns a page of users on a team. Page counting starts at 0. +func (c *Client4) GetUsersNotInChannel(teamId, channelId string, page int, perPage int, etag string) ([]*User, *Response) { + query := fmt.Sprintf("?in_team=%v¬_in_channel=%v&page=%v&per_page=%v", teamId, channelId, page, perPage) + if r, err := c.DoApiGet(c.GetUsersRoute()+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserListFromJson(r.Body), BuildResponse(r) + } +} + +// GetUsersWithoutTeam returns a page of users on the system that aren't on any teams. Page counting starts at 0. +func (c *Client4) GetUsersWithoutTeam(page int, perPage int, etag string) ([]*User, *Response) { + query := fmt.Sprintf("?without_team=1&page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetUsersRoute()+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserListFromJson(r.Body), BuildResponse(r) + } +} + +// GetUsersByIds returns a list of users based on the provided user ids. +func (c *Client4) GetUsersByIds(userIds []string) ([]*User, *Response) { + if r, err := c.DoApiPost(c.GetUsersRoute()+"/ids", ArrayToJson(userIds)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserListFromJson(r.Body), BuildResponse(r) + } +} + +// GetUsersByUsernames returns a list of users based on the provided usernames. +func (c *Client4) GetUsersByUsernames(usernames []string) ([]*User, *Response) { + if r, err := c.DoApiPost(c.GetUsersRoute()+"/usernames", ArrayToJson(usernames)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserListFromJson(r.Body), BuildResponse(r) + } +} + +// SearchUsers returns a list of users based on some search criteria. +func (c *Client4) SearchUsers(search *UserSearch) ([]*User, *Response) { + if r, err := c.DoApiPost(c.GetUsersRoute()+"/search", search.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserListFromJson(r.Body), BuildResponse(r) + } +} + +// UpdateUser updates a user in the system based on the provided user struct. +func (c *Client4) UpdateUser(user *User) (*User, *Response) { + if r, err := c.DoApiPut(c.GetUserRoute(user.Id), user.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserFromJson(r.Body), BuildResponse(r) + } +} + +// PatchUser partially updates a user in the system. Any missing fields are not updated. +func (c *Client4) PatchUser(userId string, patch *UserPatch) (*User, *Response) { + if r, err := c.DoApiPut(c.GetUserRoute(userId)+"/patch", patch.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserFromJson(r.Body), BuildResponse(r) + } +} + +// UpdateUserAuth updates a user AuthData (uthData, authService and password) in the system. +func (c *Client4) UpdateUserAuth(userId string, userAuth *UserAuth) (*UserAuth, *Response) { + if r, err := c.DoApiPut(c.GetUserRoute(userId)+"/auth", userAuth.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserAuthFromJson(r.Body), BuildResponse(r) + } +} + +// UpdateUserMfa activates multi-factor authentication for a user if activate +// is true and a valid code is provided. If activate is false, then code is not +// required and multi-factor authentication is disabled for the user. +func (c *Client4) UpdateUserMfa(userId, code string, activate bool) (bool, *Response) { + requestBody := make(map[string]interface{}) + requestBody["activate"] = activate + requestBody["code"] = code + + if r, err := c.DoApiPut(c.GetUserRoute(userId)+"/mfa", StringInterfaceToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// CheckUserMfa checks whether a user has MFA active on their account or not based on the +// provided login id. +func (c *Client4) CheckUserMfa(loginId string) (bool, *Response) { + requestBody := make(map[string]interface{}) + requestBody["login_id"] = loginId + + if r, err := c.DoApiPost(c.GetUsersRoute()+"/mfa", StringInterfaceToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + data := StringInterfaceFromJson(r.Body) + if mfaRequired, ok := data["mfa_required"].(bool); !ok { + return false, BuildResponse(r) + } else { + return mfaRequired, BuildResponse(r) + } + } +} + +// GenerateMfaSecret will generate a new MFA secret for a user and return it as a string and +// as a base64 encoded image QR code. +func (c *Client4) GenerateMfaSecret(userId string) (*MfaSecret, *Response) { + if r, err := c.DoApiPost(c.GetUserRoute(userId)+"/mfa/generate", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return MfaSecretFromJson(r.Body), BuildResponse(r) + } +} + +// UpdateUserPassword updates a user's password. Must be logged in as the user or be a system administrator. +func (c *Client4) UpdateUserPassword(userId, currentPassword, newPassword string) (bool, *Response) { + requestBody := map[string]string{"current_password": currentPassword, "new_password": newPassword} + if r, err := c.DoApiPut(c.GetUserRoute(userId)+"/password", MapToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// UpdateUserRoles updates a user's roles in the system. A user can have "system_user" and "system_admin" roles. +func (c *Client4) UpdateUserRoles(userId, roles string) (bool, *Response) { + requestBody := map[string]string{"roles": roles} + if r, err := c.DoApiPut(c.GetUserRoute(userId)+"/roles", MapToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// UpdateUserActive updates status of a user whether active or not. +func (c *Client4) UpdateUserActive(userId string, active bool) (bool, *Response) { + requestBody := make(map[string]interface{}) + requestBody["active"] = active + + if r, err := c.DoApiPut(c.GetUserRoute(userId)+"/active", StringInterfaceToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// DeleteUser deactivates a user in the system based on the provided user id string. +func (c *Client4) DeleteUser(userId string) (bool, *Response) { + if r, err := c.DoApiDelete(c.GetUserRoute(userId)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// SendPasswordResetEmail will send a link for password resetting to a user with the +// provided email. +func (c *Client4) SendPasswordResetEmail(email string) (bool, *Response) { + requestBody := map[string]string{"email": email} + if r, err := c.DoApiPost(c.GetUsersRoute()+"/password/reset/send", MapToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// ResetPassword uses a recovery code to update reset a user's password. +func (c *Client4) ResetPassword(token, newPassword string) (bool, *Response) { + requestBody := map[string]string{"token": token, "new_password": newPassword} + if r, err := c.DoApiPost(c.GetUsersRoute()+"/password/reset", MapToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// GetSessions returns a list of sessions based on the provided user id string. +func (c *Client4) GetSessions(userId, etag string) ([]*Session, *Response) { + if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/sessions", etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return SessionsFromJson(r.Body), BuildResponse(r) + } +} + +// RevokeSession revokes a user session based on the provided user id and session id strings. +func (c *Client4) RevokeSession(userId, sessionId string) (bool, *Response) { + requestBody := map[string]string{"session_id": sessionId} + if r, err := c.DoApiPost(c.GetUserRoute(userId)+"/sessions/revoke", MapToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// RevokeAllSessions revokes all sessions for the provided user id string. +func (c *Client4) RevokeAllSessions(userId string) (bool, *Response) { + if r, err := c.DoApiPost(c.GetUserRoute(userId)+"/sessions/revoke/all", ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// AttachDeviceId attaches a mobile device ID to the current session. +func (c *Client4) AttachDeviceId(deviceId string) (bool, *Response) { + requestBody := map[string]string{"device_id": deviceId} + if r, err := c.DoApiPut(c.GetUsersRoute()+"/sessions/device", MapToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// GetTeamsUnreadForUser will return an array with TeamUnread objects that contain the amount +// of unread messages and mentions the current user has for the teams it belongs to. +// An optional team ID can be set to exclude that team from the results. Must be authenticated. +func (c *Client4) GetTeamsUnreadForUser(userId, teamIdToExclude string) ([]*TeamUnread, *Response) { + optional := "" + if teamIdToExclude != "" { + optional += fmt.Sprintf("?exclude_team=%s", url.QueryEscape(teamIdToExclude)) + } + + if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/teams/unread"+optional, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamsUnreadFromJson(r.Body), BuildResponse(r) + } +} + +// GetUserAudits returns a list of audit based on the provided user id string. +func (c *Client4) GetUserAudits(userId string, page int, perPage int, etag string) (Audits, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/audits"+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return AuditsFromJson(r.Body), BuildResponse(r) + } +} + +// VerifyUserEmail will verify a user's email using the supplied token. +func (c *Client4) VerifyUserEmail(token string) (bool, *Response) { + requestBody := map[string]string{"token": token} + if r, err := c.DoApiPost(c.GetUsersRoute()+"/email/verify", MapToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// SendVerificationEmail will send an email to the user with the provided email address, if +// that user exists. The email will contain a link that can be used to verify the user's +// email address. +func (c *Client4) SendVerificationEmail(email string) (bool, *Response) { + requestBody := map[string]string{"email": email} + if r, err := c.DoApiPost(c.GetUsersRoute()+"/email/verify/send", MapToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// SetProfileImage sets profile image of the user +func (c *Client4) SetProfileImage(userId string, data []byte) (bool, *Response) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + if part, err := writer.CreateFormFile("image", "profile.png"); err != nil { + return false, &Response{Error: NewAppError("SetProfileImage", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)} + } else if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { + return false, &Response{Error: NewAppError("SetProfileImage", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + if err := writer.Close(); err != nil { + return false, &Response{Error: NewAppError("SetProfileImage", "model.client.set_profile_user.writer.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + rq, _ := http.NewRequest("POST", c.ApiUrl+c.GetUserRoute(userId)+"/image", bytes.NewReader(body.Bytes())) + rq.Header.Set("Content-Type", writer.FormDataContentType()) + rq.Close = true + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil { + // set to http.StatusForbidden(403) + return false, &Response{StatusCode: http.StatusForbidden, Error: NewAppError(c.GetUserRoute(userId)+"/image", "model.client.connecting.app_error", nil, err.Error(), 403)} + } else { + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return false, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) + } else { + return CheckStatusOK(rp), BuildResponse(rp) + } + } +} + +// CreateUserAccessToken will generate a user access token that can be used in place +// of a session token to access the REST API. Must have the 'create_user_access_token' +// permission and if generating for another user, must have the 'edit_other_users' +// permission. A non-blank description is required. +func (c *Client4) CreateUserAccessToken(userId, description string) (*UserAccessToken, *Response) { + requestBody := map[string]string{"description": description} + if r, err := c.DoApiPost(c.GetUserRoute(userId)+"/tokens", MapToJson(requestBody)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserAccessTokenFromJson(r.Body), BuildResponse(r) + } +} + +// GetUserAccessTokens will get a page of access tokens' id, description, is_active +// and the user_id in the system. The actual token will not be returned. Must have +// the 'manage_system' permission. +func (c *Client4) GetUserAccessTokens(page int, perPage int) ([]*UserAccessToken, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetUserAccessTokensRoute()+query, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserAccessTokenListFromJson(r.Body), BuildResponse(r) + } +} + +// GetUserAccessToken will get a user access tokens' id, description, is_active +// and the user_id of the user it is for. The actual token will not be returned. +// Must have the 'read_user_access_token' permission and if getting for another +// user, must have the 'edit_other_users' permission. +func (c *Client4) GetUserAccessToken(tokenId string) (*UserAccessToken, *Response) { + if r, err := c.DoApiGet(c.GetUserAccessTokenRoute(tokenId), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserAccessTokenFromJson(r.Body), BuildResponse(r) + } +} + +// GetUserAccessTokensForUser will get a paged list of user access tokens showing id, +// description and user_id for each. The actual tokens will not be returned. Must have +// the 'read_user_access_token' permission and if getting for another user, must have the +// 'edit_other_users' permission. +func (c *Client4) GetUserAccessTokensForUser(userId string, page, perPage int) ([]*UserAccessToken, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/tokens"+query, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserAccessTokenListFromJson(r.Body), BuildResponse(r) + } +} + +// RevokeUserAccessToken will revoke a user access token by id. Must have the +// 'revoke_user_access_token' permission and if revoking for another user, must have the +// 'edit_other_users' permission. +func (c *Client4) RevokeUserAccessToken(tokenId string) (bool, *Response) { + requestBody := map[string]string{"token_id": tokenId} + if r, err := c.DoApiPost(c.GetUsersRoute()+"/tokens/revoke", MapToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// SearchUserAccessTokens returns user access tokens matching the provided search term. +func (c *Client4) SearchUserAccessTokens(search *UserAccessTokenSearch) ([]*UserAccessToken, *Response) { + if r, err := c.DoApiPost(c.GetUsersRoute()+"/tokens/search", search.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserAccessTokenListFromJson(r.Body), BuildResponse(r) + } +} + +// DisableUserAccessToken will disable a user access token by id. Must have the +// 'revoke_user_access_token' permission and if disabling for another user, must have the +// 'edit_other_users' permission. +func (c *Client4) DisableUserAccessToken(tokenId string) (bool, *Response) { + requestBody := map[string]string{"token_id": tokenId} + if r, err := c.DoApiPost(c.GetUsersRoute()+"/tokens/disable", MapToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// EnableUserAccessToken will enable a user access token by id. Must have the +// 'create_user_access_token' permission and if enabling for another user, must have the +// 'edit_other_users' permission. +func (c *Client4) EnableUserAccessToken(tokenId string) (bool, *Response) { + requestBody := map[string]string{"token_id": tokenId} + if r, err := c.DoApiPost(c.GetUsersRoute()+"/tokens/enable", MapToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// Team Section + +// CreateTeam creates a team in the system based on the provided team struct. +func (c *Client4) CreateTeam(team *Team) (*Team, *Response) { + if r, err := c.DoApiPost(c.GetTeamsRoute(), team.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamFromJson(r.Body), BuildResponse(r) + } +} + +// GetTeam returns a team based on the provided team id string. +func (c *Client4) GetTeam(teamId, etag string) (*Team, *Response) { + if r, err := c.DoApiGet(c.GetTeamRoute(teamId), etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamFromJson(r.Body), BuildResponse(r) + } +} + +// GetAllTeams returns all teams based on permissions. +func (c *Client4) GetAllTeams(etag string, page int, perPage int) ([]*Team, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetTeamsRoute()+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamListFromJson(r.Body), BuildResponse(r) + } +} + +// GetTeamByName returns a team based on the provided team name string. +func (c *Client4) GetTeamByName(name, etag string) (*Team, *Response) { + if r, err := c.DoApiGet(c.GetTeamByNameRoute(name), etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamFromJson(r.Body), BuildResponse(r) + } +} + +// SearchTeams returns teams matching the provided search term. +func (c *Client4) SearchTeams(search *TeamSearch) ([]*Team, *Response) { + if r, err := c.DoApiPost(c.GetTeamsRoute()+"/search", search.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamListFromJson(r.Body), BuildResponse(r) + } +} + +// TeamExists returns true or false if the team exist or not. +func (c *Client4) TeamExists(name, etag string) (bool, *Response) { + if r, err := c.DoApiGet(c.GetTeamByNameRoute(name)+"/exists", etag); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return MapBoolFromJson(r.Body)["exists"], BuildResponse(r) + } +} + +// GetTeamsForUser returns a list of teams a user is on. Must be logged in as the user +// or be a system administrator. +func (c *Client4) GetTeamsForUser(userId, etag string) ([]*Team, *Response) { + if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/teams", etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamListFromJson(r.Body), BuildResponse(r) + } +} + +// GetTeamMember returns a team member based on the provided team and user id strings. +func (c *Client4) GetTeamMember(teamId, userId, etag string) (*TeamMember, *Response) { + if r, err := c.DoApiGet(c.GetTeamMemberRoute(teamId, userId), etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamMemberFromJson(r.Body), BuildResponse(r) + } +} + +// UpdateTeamMemberRoles will update the roles on a team for a user. +func (c *Client4) UpdateTeamMemberRoles(teamId, userId, newRoles string) (bool, *Response) { + requestBody := map[string]string{"roles": newRoles} + if r, err := c.DoApiPut(c.GetTeamMemberRoute(teamId, userId)+"/roles", MapToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// UpdateTeam will update a team. +func (c *Client4) UpdateTeam(team *Team) (*Team, *Response) { + if r, err := c.DoApiPut(c.GetTeamRoute(team.Id), team.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamFromJson(r.Body), BuildResponse(r) + } +} + +// PatchTeam partially updates a team. Any missing fields are not updated. +func (c *Client4) PatchTeam(teamId string, patch *TeamPatch) (*Team, *Response) { + if r, err := c.DoApiPut(c.GetTeamRoute(teamId)+"/patch", patch.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamFromJson(r.Body), BuildResponse(r) + } +} + +// SoftDeleteTeam deletes the team softly (archive only, not permanent delete). +func (c *Client4) SoftDeleteTeam(teamId string) (bool, *Response) { + if r, err := c.DoApiDelete(c.GetTeamRoute(teamId)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// PermanentDeleteTeam deletes the team, should only be used when needed for +// compliance and the like +func (c *Client4) PermanentDeleteTeam(teamId string) (bool, *Response) { + if r, err := c.DoApiDelete(c.GetTeamRoute(teamId) + "?permanent=true"); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// GetTeamMembers returns team members based on the provided team id string. +func (c *Client4) GetTeamMembers(teamId string, page int, perPage int, etag string) ([]*TeamMember, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetTeamMembersRoute(teamId)+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamMembersFromJson(r.Body), BuildResponse(r) + } +} + +// GetTeamMembersForUser returns the team members for a user. +func (c *Client4) GetTeamMembersForUser(userId string, etag string) ([]*TeamMember, *Response) { + if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/teams/members", etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamMembersFromJson(r.Body), BuildResponse(r) + } +} + +// GetTeamMembersByIds will return an array of team members based on the +// team id and a list of user ids provided. Must be authenticated. +func (c *Client4) GetTeamMembersByIds(teamId string, userIds []string) ([]*TeamMember, *Response) { + if r, err := c.DoApiPost(fmt.Sprintf("/teams/%v/members/ids", teamId), ArrayToJson(userIds)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamMembersFromJson(r.Body), BuildResponse(r) + } +} + +// AddTeamMember adds user to a team and return a team member. +func (c *Client4) AddTeamMember(teamId, userId string) (*TeamMember, *Response) { + member := &TeamMember{TeamId: teamId, UserId: userId} + + if r, err := c.DoApiPost(c.GetTeamMembersRoute(teamId), member.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamMemberFromJson(r.Body), BuildResponse(r) + } +} + +// AddTeamMemberFromInvite adds a user to a team and return a team member using an invite id +// or an invite hash/data pair. +func (c *Client4) AddTeamMemberFromInvite(hash, dataToHash, inviteId string) (*TeamMember, *Response) { + var query string + + if inviteId != "" { + query += fmt.Sprintf("?invite_id=%v", inviteId) + } + + if hash != "" && dataToHash != "" { + query += fmt.Sprintf("?hash=%v&data=%v", hash, dataToHash) + } + + if r, err := c.DoApiPost(c.GetTeamsRoute()+"/members/invite"+query, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamMemberFromJson(r.Body), BuildResponse(r) + } +} + +// AddTeamMembers adds a number of users to a team and returns the team members. +func (c *Client4) AddTeamMembers(teamId string, userIds []string) ([]*TeamMember, *Response) { + var members []*TeamMember + for _, userId := range userIds { + member := &TeamMember{TeamId: teamId, UserId: userId} + members = append(members, member) + } + + if r, err := c.DoApiPost(c.GetTeamMembersRoute(teamId)+"/batch", TeamMembersToJson(members)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamMembersFromJson(r.Body), BuildResponse(r) + } +} + +// RemoveTeamMember will remove a user from a team. +func (c *Client4) RemoveTeamMember(teamId, userId string) (bool, *Response) { + if r, err := c.DoApiDelete(c.GetTeamMemberRoute(teamId, userId)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// GetTeamStats returns a team stats based on the team id string. +// Must be authenticated. +func (c *Client4) GetTeamStats(teamId, etag string) (*TeamStats, *Response) { + if r, err := c.DoApiGet(c.GetTeamStatsRoute(teamId), etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamStatsFromJson(r.Body), BuildResponse(r) + } +} + +// GetTeamUnread will return a TeamUnread object that contains the amount of +// unread messages and mentions the user has for the specified team. +// Must be authenticated. +func (c *Client4) GetTeamUnread(teamId, userId string) (*TeamUnread, *Response) { + if r, err := c.DoApiGet(c.GetUserRoute(userId)+c.GetTeamRoute(teamId)+"/unread", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamUnreadFromJson(r.Body), BuildResponse(r) + } +} + +// ImportTeam will import an exported team from other app into a existing team. +func (c *Client4) ImportTeam(data []byte, filesize int, importFrom, filename, teamId string) (map[string]string, *Response) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + if part, err := writer.CreateFormFile("file", filename); err != nil { + return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), http.StatusBadRequest)} + } else if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { + return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + if part, err := writer.CreateFormField("filesize"); err != nil { + return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.file_size.app_error", nil, err.Error(), http.StatusBadRequest)} + } else if _, err = io.Copy(part, strings.NewReader(strconv.Itoa(filesize))); err != nil { + return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.file_size.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + if part, err := writer.CreateFormField("importFrom"); err != nil { + return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.import_from.app_error", nil, err.Error(), http.StatusBadRequest)} + } else if _, err = io.Copy(part, strings.NewReader(importFrom)); err != nil { + return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.import_from.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + if err := writer.Close(); err != nil { + return nil, &Response{Error: NewAppError("UploadImportTeam", "model.client.upload_post_attachment.writer.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + return c.DoUploadImportTeam(c.GetTeamImportRoute(teamId), body.Bytes(), writer.FormDataContentType()) +} + +// InviteUsersToTeam invite users by email to the team. +func (c *Client4) InviteUsersToTeam(teamId string, userEmails []string) (bool, *Response) { + if r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/invite/email", ArrayToJson(userEmails)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// GetTeamInviteInfo returns a team object from an invite id containing sanitized information. +func (c *Client4) GetTeamInviteInfo(inviteId string) (*Team, *Response) { + if r, err := c.DoApiGet(c.GetTeamsRoute()+"/invite/"+inviteId, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return TeamFromJson(r.Body), BuildResponse(r) + } +} + +// Channel Section + +// CreateChannel creates a channel based on the provided channel struct. +func (c *Client4) CreateChannel(channel *Channel) (*Channel, *Response) { + if r, err := c.DoApiPost(c.GetChannelsRoute(), channel.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelFromJson(r.Body), BuildResponse(r) + } +} + +// UpdateChannel update a channel based on the provided channel struct. +func (c *Client4) UpdateChannel(channel *Channel) (*Channel, *Response) { + if r, err := c.DoApiPut(c.GetChannelRoute(channel.Id), channel.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelFromJson(r.Body), BuildResponse(r) + } +} + +// PatchChannel partially updates a channel. Any missing fields are not updated. +func (c *Client4) PatchChannel(channelId string, patch *ChannelPatch) (*Channel, *Response) { + if r, err := c.DoApiPut(c.GetChannelRoute(channelId)+"/patch", patch.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelFromJson(r.Body), BuildResponse(r) + } +} + +// RestoreChannel restores a previously deleted channel. Any missing fields are not updated. +func (c *Client4) RestoreChannel(channelId string) (*Channel, *Response) { + if r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/restore", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelFromJson(r.Body), BuildResponse(r) + } +} + +// CreateDirectChannel creates a direct message channel based on the two user +// ids provided. +func (c *Client4) CreateDirectChannel(userId1, userId2 string) (*Channel, *Response) { + requestBody := []string{userId1, userId2} + if r, err := c.DoApiPost(c.GetChannelsRoute()+"/direct", ArrayToJson(requestBody)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelFromJson(r.Body), BuildResponse(r) + } +} + +// CreateGroupChannel creates a group message channel based on userIds provided +func (c *Client4) CreateGroupChannel(userIds []string) (*Channel, *Response) { + if r, err := c.DoApiPost(c.GetChannelsRoute()+"/group", ArrayToJson(userIds)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelFromJson(r.Body), BuildResponse(r) + } +} + +// GetChannel returns a channel based on the provided channel id string. +func (c *Client4) GetChannel(channelId, etag string) (*Channel, *Response) { + if r, err := c.DoApiGet(c.GetChannelRoute(channelId), etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelFromJson(r.Body), BuildResponse(r) + } +} + +// GetChannelStats returns statistics for a channel. +func (c *Client4) GetChannelStats(channelId string, etag string) (*ChannelStats, *Response) { + if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/stats", etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelStatsFromJson(r.Body), BuildResponse(r) + } +} + +// GetPinnedPosts gets a list of pinned posts. +func (c *Client4) GetPinnedPosts(channelId string, etag string) (*PostList, *Response) { + if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/pinned", etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PostListFromJson(r.Body), BuildResponse(r) + } +} + +// GetPublicChannelsForTeam returns a list of public channels based on the provided team id string. +func (c *Client4) GetPublicChannelsForTeam(teamId string, page int, perPage int, etag string) ([]*Channel, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetChannelsForTeamRoute(teamId)+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelSliceFromJson(r.Body), BuildResponse(r) + } +} + +// GetDeletedChannelsForTeam returns a list of public channels based on the provided team id string. +func (c *Client4) GetDeletedChannelsForTeam(teamId string, page int, perPage int, etag string) ([]*Channel, *Response) { + query := fmt.Sprintf("/deleted?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetChannelsForTeamRoute(teamId)+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelSliceFromJson(r.Body), BuildResponse(r) + } +} + +// GetPublicChannelsByIdsForTeam returns a list of public channels based on provided team id string +func (c *Client4) GetPublicChannelsByIdsForTeam(teamId string, channelIds []string) ([]*Channel, *Response) { + if r, err := c.DoApiPost(c.GetChannelsForTeamRoute(teamId)+"/ids", ArrayToJson(channelIds)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelSliceFromJson(r.Body), BuildResponse(r) + } +} + +// GetChannelsForTeamForUser returns a list channels of on a team for a user. +func (c *Client4) GetChannelsForTeamForUser(teamId, userId, etag string) ([]*Channel, *Response) { + if r, err := c.DoApiGet(c.GetUserRoute(userId)+c.GetTeamRoute(teamId)+"/channels", etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelSliceFromJson(r.Body), BuildResponse(r) + } +} + +// SearchChannels returns the channels on a team matching the provided search term. +func (c *Client4) SearchChannels(teamId string, search *ChannelSearch) ([]*Channel, *Response) { + if r, err := c.DoApiPost(c.GetChannelsForTeamRoute(teamId)+"/search", search.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelSliceFromJson(r.Body), BuildResponse(r) + } +} + +// DeleteChannel deletes channel based on the provided channel id string. +func (c *Client4) DeleteChannel(channelId string) (bool, *Response) { + if r, err := c.DoApiDelete(c.GetChannelRoute(channelId)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// GetChannelByName returns a channel based on the provided channel name and team id strings. +func (c *Client4) GetChannelByName(channelName, teamId string, etag string) (*Channel, *Response) { + if r, err := c.DoApiGet(c.GetChannelByNameRoute(channelName, teamId), etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelFromJson(r.Body), BuildResponse(r) + } +} + +// GetChannelByNameForTeamName returns a channel based on the provided channel name and team name strings. +func (c *Client4) GetChannelByNameForTeamName(channelName, teamName string, etag string) (*Channel, *Response) { + if r, err := c.DoApiGet(c.GetChannelByNameForTeamNameRoute(channelName, teamName), etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelFromJson(r.Body), BuildResponse(r) + } +} + +// GetChannelMembers gets a page of channel members. +func (c *Client4) GetChannelMembers(channelId string, page, perPage int, etag string) (*ChannelMembers, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetChannelMembersRoute(channelId)+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelMembersFromJson(r.Body), BuildResponse(r) + } +} + +// GetChannelMembersByIds gets the channel members in a channel for a list of user ids. +func (c *Client4) GetChannelMembersByIds(channelId string, userIds []string) (*ChannelMembers, *Response) { + if r, err := c.DoApiPost(c.GetChannelMembersRoute(channelId)+"/ids", ArrayToJson(userIds)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelMembersFromJson(r.Body), BuildResponse(r) + + } +} + +// GetChannelMember gets a channel member. +func (c *Client4) GetChannelMember(channelId, userId, etag string) (*ChannelMember, *Response) { + if r, err := c.DoApiGet(c.GetChannelMemberRoute(channelId, userId), etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelMemberFromJson(r.Body), BuildResponse(r) + } +} + +// GetChannelMembersForUser gets all the channel members for a user on a team. +func (c *Client4) GetChannelMembersForUser(userId, teamId, etag string) (*ChannelMembers, *Response) { + if r, err := c.DoApiGet(fmt.Sprintf(c.GetUserRoute(userId)+"/teams/%v/channels/members", teamId), etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelMembersFromJson(r.Body), BuildResponse(r) + } +} + +// ViewChannel performs a view action for a user. Synonymous with switching channels or marking channels as read by a user. +func (c *Client4) ViewChannel(userId string, view *ChannelView) (*ChannelViewResponse, *Response) { + url := fmt.Sprintf(c.GetChannelsRoute()+"/members/%v/view", userId) + if r, err := c.DoApiPost(url, view.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelViewResponseFromJson(r.Body), BuildResponse(r) + } +} + +// GetChannelUnread will return a ChannelUnread object that contains the number of +// unread messages and mentions for a user. +func (c *Client4) GetChannelUnread(channelId, userId string) (*ChannelUnread, *Response) { + if r, err := c.DoApiGet(c.GetUserRoute(userId)+c.GetChannelRoute(channelId)+"/unread", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelUnreadFromJson(r.Body), BuildResponse(r) + } +} + +// UpdateChannelRoles will update the roles on a channel for a user. +func (c *Client4) UpdateChannelRoles(channelId, userId, roles string) (bool, *Response) { + requestBody := map[string]string{"roles": roles} + if r, err := c.DoApiPut(c.GetChannelMemberRoute(channelId, userId)+"/roles", MapToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// UpdateChannelNotifyProps will update the notification properties on a channel for a user. +func (c *Client4) UpdateChannelNotifyProps(channelId, userId string, props map[string]string) (bool, *Response) { + if r, err := c.DoApiPut(c.GetChannelMemberRoute(channelId, userId)+"/notify_props", MapToJson(props)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// AddChannelMember adds user to channel and return a channel member. +func (c *Client4) AddChannelMember(channelId, userId string) (*ChannelMember, *Response) { + requestBody := map[string]string{"user_id": userId} + if r, err := c.DoApiPost(c.GetChannelMembersRoute(channelId)+"", MapToJson(requestBody)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelMemberFromJson(r.Body), BuildResponse(r) + } +} + +// AddChannelMemberWithRootId adds user to channel and return a channel member. Post add to channel message has the postRootId. +func (c *Client4) AddChannelMemberWithRootId(channelId, userId, postRootId string) (*ChannelMember, *Response) { + requestBody := map[string]string{"user_id": userId, "post_root_id": postRootId} + if r, err := c.DoApiPost(c.GetChannelMembersRoute(channelId)+"", MapToJson(requestBody)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelMemberFromJson(r.Body), BuildResponse(r) + } +} + +// RemoveUserFromChannel will delete the channel member object for a user, effectively removing the user from a channel. +func (c *Client4) RemoveUserFromChannel(channelId, userId string) (bool, *Response) { + if r, err := c.DoApiDelete(c.GetChannelMemberRoute(channelId, userId)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// Post Section + +// CreatePost creates a post based on the provided post struct. +func (c *Client4) CreatePost(post *Post) (*Post, *Response) { + if r, err := c.DoApiPost(c.GetPostsRoute(), post.ToUnsanitizedJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PostFromJson(r.Body), BuildResponse(r) + } +} + +// UpdatePost updates a post based on the provided post struct. +func (c *Client4) UpdatePost(postId string, post *Post) (*Post, *Response) { + if r, err := c.DoApiPut(c.GetPostRoute(postId), post.ToUnsanitizedJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PostFromJson(r.Body), BuildResponse(r) + } +} + +// PatchPost partially updates a post. Any missing fields are not updated. +func (c *Client4) PatchPost(postId string, patch *PostPatch) (*Post, *Response) { + if r, err := c.DoApiPut(c.GetPostRoute(postId)+"/patch", patch.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PostFromJson(r.Body), BuildResponse(r) + } +} + +// PinPost pin a post based on provided post id string. +func (c *Client4) PinPost(postId string) (bool, *Response) { + if r, err := c.DoApiPost(c.GetPostRoute(postId)+"/pin", ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// UnpinPost unpin a post based on provided post id string. +func (c *Client4) UnpinPost(postId string) (bool, *Response) { + if r, err := c.DoApiPost(c.GetPostRoute(postId)+"/unpin", ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// GetPost gets a single post. +func (c *Client4) GetPost(postId string, etag string) (*Post, *Response) { + if r, err := c.DoApiGet(c.GetPostRoute(postId), etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PostFromJson(r.Body), BuildResponse(r) + } +} + +// DeletePost deletes a post from the provided post id string. +func (c *Client4) DeletePost(postId string) (bool, *Response) { + if r, err := c.DoApiDelete(c.GetPostRoute(postId)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// GetPostThread gets a post with all the other posts in the same thread. +func (c *Client4) GetPostThread(postId string, etag string) (*PostList, *Response) { + if r, err := c.DoApiGet(c.GetPostRoute(postId)+"/thread", etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PostListFromJson(r.Body), BuildResponse(r) + } +} + +// GetPostsForChannel gets a page of posts with an array for ordering for a channel. +func (c *Client4) GetPostsForChannel(channelId string, page, perPage int, etag string) (*PostList, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/posts"+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PostListFromJson(r.Body), BuildResponse(r) + } +} + +// GetFlaggedPostsForUser returns flagged posts of a user based on user id string. +func (c *Client4) GetFlaggedPostsForUser(userId string, page int, perPage int) (*PostList, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/posts/flagged"+query, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PostListFromJson(r.Body), BuildResponse(r) + } +} + +// GetFlaggedPostsForUserInTeam returns flagged posts in team of a user based on user id string. +func (c *Client4) GetFlaggedPostsForUserInTeam(userId string, teamId string, page int, perPage int) (*PostList, *Response) { + if len(teamId) == 0 || len(teamId) != 26 { + return nil, &Response{StatusCode: http.StatusBadRequest, Error: NewAppError("GetFlaggedPostsForUserInTeam", "model.client.get_flagged_posts_in_team.missing_parameter.app_error", nil, "", http.StatusBadRequest)} + } + + query := fmt.Sprintf("?team_id=%v&page=%v&per_page=%v", teamId, page, perPage) + if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/posts/flagged"+query, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PostListFromJson(r.Body), BuildResponse(r) + } +} + +// GetFlaggedPostsForUserInChannel returns flagged posts in channel of a user based on user id string. +func (c *Client4) GetFlaggedPostsForUserInChannel(userId string, channelId string, page int, perPage int) (*PostList, *Response) { + if len(channelId) == 0 || len(channelId) != 26 { + return nil, &Response{StatusCode: http.StatusBadRequest, Error: NewAppError("GetFlaggedPostsForUserInChannel", "model.client.get_flagged_posts_in_channel.missing_parameter.app_error", nil, "", http.StatusBadRequest)} + } + + query := fmt.Sprintf("?channel_id=%v&page=%v&per_page=%v", channelId, page, perPage) + if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/posts/flagged"+query, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PostListFromJson(r.Body), BuildResponse(r) + } +} + +// GetPostsSince gets posts created after a specified time as Unix time in milliseconds. +func (c *Client4) GetPostsSince(channelId string, time int64) (*PostList, *Response) { + query := fmt.Sprintf("?since=%v", time) + if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/posts"+query, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PostListFromJson(r.Body), BuildResponse(r) + } +} + +// GetPostsAfter gets a page of posts that were posted after the post provided. +func (c *Client4) GetPostsAfter(channelId, postId string, page, perPage int, etag string) (*PostList, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v&after=%v", page, perPage, postId) + if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/posts"+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PostListFromJson(r.Body), BuildResponse(r) + } +} + +// GetPostsBefore gets a page of posts that were posted before the post provided. +func (c *Client4) GetPostsBefore(channelId, postId string, page, perPage int, etag string) (*PostList, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v&before=%v", page, perPage, postId) + if r, err := c.DoApiGet(c.GetChannelRoute(channelId)+"/posts"+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PostListFromJson(r.Body), BuildResponse(r) + } +} + +// SearchPosts returns any posts with matching terms string. +func (c *Client4) SearchPosts(teamId string, terms string, isOrSearch bool) (*PostList, *Response) { + requestBody := map[string]interface{}{"terms": terms, "is_or_search": isOrSearch} + if r, err := c.DoApiPost(c.GetTeamRoute(teamId)+"/posts/search", StringInterfaceToJson(requestBody)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PostListFromJson(r.Body), BuildResponse(r) + } +} + +// DoPostAction performs a post action. +func (c *Client4) DoPostAction(postId, actionId string) (bool, *Response) { + if r, err := c.DoApiPost(c.GetPostRoute(postId)+"/actions/"+actionId, ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// File Section + +// UploadFile will upload a file to a channel, to be later attached to a post. +func (c *Client4) UploadFile(data []byte, channelId string, filename string) (*FileUploadResponse, *Response) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + if part, err := writer.CreateFormFile("files", filename); err != nil { + return nil, &Response{Error: NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), http.StatusBadRequest)} + } else if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { + return nil, &Response{Error: NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + if part, err := writer.CreateFormField("channel_id"); err != nil { + return nil, &Response{Error: NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.channel_id.app_error", nil, err.Error(), http.StatusBadRequest)} + } else if _, err = io.Copy(part, strings.NewReader(channelId)); err != nil { + return nil, &Response{Error: NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.channel_id.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + if err := writer.Close(); err != nil { + return nil, &Response{Error: NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.writer.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + return c.DoUploadFile(c.GetFilesRoute(), body.Bytes(), writer.FormDataContentType()) +} + +// GetFile gets the bytes for a file by id. +func (c *Client4) GetFile(fileId string) ([]byte, *Response) { + if r, err := c.DoApiGet(c.GetFileRoute(fileId), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + + if data, err := ioutil.ReadAll(r.Body); err != nil { + return nil, BuildErrorResponse(r, NewAppError("GetFile", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) + } else { + return data, BuildResponse(r) + } + } +} + +// DownloadFile gets the bytes for a file by id, optionally adding headers to force the browser to download it +func (c *Client4) DownloadFile(fileId string, download bool) ([]byte, *Response) { + if r, err := c.DoApiGet(c.GetFileRoute(fileId)+fmt.Sprintf("?download=%v", download), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + + if data, err := ioutil.ReadAll(r.Body); err != nil { + return nil, BuildErrorResponse(r, NewAppError("DownloadFile", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) + } else { + return data, BuildResponse(r) + } + } +} + +// GetFileThumbnail gets the bytes for a file by id. +func (c *Client4) GetFileThumbnail(fileId string) ([]byte, *Response) { + if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/thumbnail", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + + if data, err := ioutil.ReadAll(r.Body); err != nil { + return nil, BuildErrorResponse(r, NewAppError("GetFileThumbnail", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) + } else { + return data, BuildResponse(r) + } + } +} + +// DownloadFileThumbnail gets the bytes for a file by id, optionally adding headers to force the browser to download it. +func (c *Client4) DownloadFileThumbnail(fileId string, download bool) ([]byte, *Response) { + if r, err := c.DoApiGet(c.GetFileRoute(fileId)+fmt.Sprintf("/thumbnail?download=%v", download), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + + if data, err := ioutil.ReadAll(r.Body); err != nil { + return nil, BuildErrorResponse(r, NewAppError("DownloadFileThumbnail", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) + } else { + return data, BuildResponse(r) + } + } +} + +// GetFileLink gets the public link of a file by id. +func (c *Client4) GetFileLink(fileId string) (string, *Response) { + if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/link", ""); err != nil { + return "", BuildErrorResponse(r, err) + } else { + defer closeBody(r) + + return MapFromJson(r.Body)["link"], BuildResponse(r) + } +} + +// GetFilePreview gets the bytes for a file by id. +func (c *Client4) GetFilePreview(fileId string) ([]byte, *Response) { + if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/preview", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + + if data, err := ioutil.ReadAll(r.Body); err != nil { + return nil, BuildErrorResponse(r, NewAppError("GetFilePreview", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) + } else { + return data, BuildResponse(r) + } + } +} + +// DownloadFilePreview gets the bytes for a file by id. +func (c *Client4) DownloadFilePreview(fileId string, download bool) ([]byte, *Response) { + if r, err := c.DoApiGet(c.GetFileRoute(fileId)+fmt.Sprintf("/preview?download=%v", download), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + + if data, err := ioutil.ReadAll(r.Body); err != nil { + return nil, BuildErrorResponse(r, NewAppError("DownloadFilePreview", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) + } else { + return data, BuildResponse(r) + } + } +} + +// GetFileInfo gets all the file info objects. +func (c *Client4) GetFileInfo(fileId string) (*FileInfo, *Response) { + if r, err := c.DoApiGet(c.GetFileRoute(fileId)+"/info", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return FileInfoFromJson(r.Body), BuildResponse(r) + } +} + +// GetFileInfosForPost gets all the file info objects attached to a post. +func (c *Client4) GetFileInfosForPost(postId string, etag string) ([]*FileInfo, *Response) { + if r, err := c.DoApiGet(c.GetPostRoute(postId)+"/files/info", etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return FileInfosFromJson(r.Body), BuildResponse(r) + } +} + +// General/System Section + +// GetPing will return ok if the running goRoutines are below the threshold and unhealthy for above. +func (c *Client4) GetPing() (string, *Response) { + if r, err := c.DoApiGet(c.GetSystemRoute()+"/ping", ""); r != nil && r.StatusCode == 500 { + defer r.Body.Close() + return "unhealthy", BuildErrorResponse(r, err) + } else if err != nil { + return "", BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return MapFromJson(r.Body)["status"], BuildResponse(r) + } +} + +// TestEmail will attempt to connect to the configured SMTP server. +func (c *Client4) TestEmail() (bool, *Response) { + if r, err := c.DoApiPost(c.GetTestEmailRoute(), ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// GetConfig will retrieve the server config with some sanitized items. +func (c *Client4) GetConfig() (*Config, *Response) { + if r, err := c.DoApiGet(c.GetConfigRoute(), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ConfigFromJson(r.Body), BuildResponse(r) + } +} + +// ReloadConfig will reload the server configuration. +func (c *Client4) ReloadConfig() (bool, *Response) { + if r, err := c.DoApiPost(c.GetConfigRoute()+"/reload", ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// GetOldClientConfig will retrieve the parts of the server configuration needed by the +// client, formatted in the old format. +func (c *Client4) GetOldClientConfig(etag string) (map[string]string, *Response) { + if r, err := c.DoApiGet(c.GetConfigRoute()+"/client?format=old", etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return MapFromJson(r.Body), BuildResponse(r) + } +} + +// GetOldClientLicense will retrieve the parts of the server license needed by the +// client, formatted in the old format. +func (c *Client4) GetOldClientLicense(etag string) (map[string]string, *Response) { + if r, err := c.DoApiGet(c.GetLicenseRoute()+"/client?format=old", etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return MapFromJson(r.Body), BuildResponse(r) + } +} + +// DatabaseRecycle will recycle the connections. Discard current connection and get new one. +func (c *Client4) DatabaseRecycle() (bool, *Response) { + if r, err := c.DoApiPost(c.GetDatabaseRoute()+"/recycle", ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// InvalidateCaches will purge the cache and can affect the performance while is cleaning. +func (c *Client4) InvalidateCaches() (bool, *Response) { + if r, err := c.DoApiPost(c.GetCacheRoute()+"/invalidate", ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// UpdateConfig will update the server configuration. +func (c *Client4) UpdateConfig(config *Config) (*Config, *Response) { + if r, err := c.DoApiPut(c.GetConfigRoute(), config.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ConfigFromJson(r.Body), BuildResponse(r) + } +} + +// UploadLicenseFile will add a license file to the system. +func (c *Client4) UploadLicenseFile(data []byte) (bool, *Response) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + if part, err := writer.CreateFormFile("license", "test-license.mattermost-license"); err != nil { + return false, &Response{Error: NewAppError("UploadLicenseFile", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)} + } else if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { + return false, &Response{Error: NewAppError("UploadLicenseFile", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + if err := writer.Close(); err != nil { + return false, &Response{Error: NewAppError("UploadLicenseFile", "model.client.set_profile_user.writer.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + rq, _ := http.NewRequest("POST", c.ApiUrl+c.GetLicenseRoute(), bytes.NewReader(body.Bytes())) + rq.Header.Set("Content-Type", writer.FormDataContentType()) + rq.Close = true + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil { + return false, &Response{StatusCode: http.StatusForbidden, Error: NewAppError(c.GetLicenseRoute(), "model.client.connecting.app_error", nil, err.Error(), http.StatusForbidden)} + } else { + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return false, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) + } else { + return CheckStatusOK(rp), BuildResponse(rp) + } + } +} + +// RemoveLicenseFile will remove the server license it exists. Note that this will +// disable all enterprise features. +func (c *Client4) RemoveLicenseFile() (bool, *Response) { + if r, err := c.DoApiDelete(c.GetLicenseRoute()); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// GetAnalyticsOld will retrieve analytics using the old format. New format is not +// available but the "/analytics" endpoint is reserved for it. The "name" argument is optional +// and defaults to "standard". The "teamId" argument is optional and will limit results +// to a specific team. +func (c *Client4) GetAnalyticsOld(name, teamId string) (AnalyticsRows, *Response) { + query := fmt.Sprintf("?name=%v&teamId=%v", name, teamId) + if r, err := c.DoApiGet(c.GetAnalyticsRoute()+"/old"+query, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return AnalyticsRowsFromJson(r.Body), BuildResponse(r) + } +} + +// Webhooks Section + +// CreateIncomingWebhook creates an incoming webhook for a channel. +func (c *Client4) CreateIncomingWebhook(hook *IncomingWebhook) (*IncomingWebhook, *Response) { + if r, err := c.DoApiPost(c.GetIncomingWebhooksRoute(), hook.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return IncomingWebhookFromJson(r.Body), BuildResponse(r) + } +} + +// UpdateIncomingWebhook updates an incoming webhook for a channel. +func (c *Client4) UpdateIncomingWebhook(hook *IncomingWebhook) (*IncomingWebhook, *Response) { + if r, err := c.DoApiPut(c.GetIncomingWebhookRoute(hook.Id), hook.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return IncomingWebhookFromJson(r.Body), BuildResponse(r) + } +} + +// GetIncomingWebhooks returns a page of incoming webhooks on the system. Page counting starts at 0. +func (c *Client4) GetIncomingWebhooks(page int, perPage int, etag string) ([]*IncomingWebhook, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetIncomingWebhooksRoute()+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return IncomingWebhookListFromJson(r.Body), BuildResponse(r) + } +} + +// GetIncomingWebhooksForTeam returns a page of incoming webhooks for a team. Page counting starts at 0. +func (c *Client4) GetIncomingWebhooksForTeam(teamId string, page int, perPage int, etag string) ([]*IncomingWebhook, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v&team_id=%v", page, perPage, teamId) + if r, err := c.DoApiGet(c.GetIncomingWebhooksRoute()+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return IncomingWebhookListFromJson(r.Body), BuildResponse(r) + } +} + +// GetIncomingWebhook returns an Incoming webhook given the hook ID +func (c *Client4) GetIncomingWebhook(hookID string, etag string) (*IncomingWebhook, *Response) { + if r, err := c.DoApiGet(c.GetIncomingWebhookRoute(hookID), etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return IncomingWebhookFromJson(r.Body), BuildResponse(r) + } +} + +// DeleteIncomingWebhook deletes and Incoming Webhook given the hook ID +func (c *Client4) DeleteIncomingWebhook(hookID string) (bool, *Response) { + if r, err := c.DoApiDelete(c.GetIncomingWebhookRoute(hookID)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// CreateOutgoingWebhook creates an outgoing webhook for a team or channel. +func (c *Client4) CreateOutgoingWebhook(hook *OutgoingWebhook) (*OutgoingWebhook, *Response) { + if r, err := c.DoApiPost(c.GetOutgoingWebhooksRoute(), hook.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return OutgoingWebhookFromJson(r.Body), BuildResponse(r) + } +} + +// UpdateOutgoingWebhook creates an outgoing webhook for a team or channel. +func (c *Client4) UpdateOutgoingWebhook(hook *OutgoingWebhook) (*OutgoingWebhook, *Response) { + if r, err := c.DoApiPut(c.GetOutgoingWebhookRoute(hook.Id), hook.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return OutgoingWebhookFromJson(r.Body), BuildResponse(r) + } +} + +// GetOutgoingWebhooks returns a page of outgoing webhooks on the system. Page counting starts at 0. +func (c *Client4) GetOutgoingWebhooks(page int, perPage int, etag string) ([]*OutgoingWebhook, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetOutgoingWebhooksRoute()+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return OutgoingWebhookListFromJson(r.Body), BuildResponse(r) + } +} + +// GetOutgoingWebhook outgoing webhooks on the system requested by Hook Id. +func (c *Client4) GetOutgoingWebhook(hookId string) (*OutgoingWebhook, *Response) { + if r, err := c.DoApiGet(c.GetOutgoingWebhookRoute(hookId), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return OutgoingWebhookFromJson(r.Body), BuildResponse(r) + } +} + +// GetOutgoingWebhooksForChannel returns a page of outgoing webhooks for a channel. Page counting starts at 0. +func (c *Client4) GetOutgoingWebhooksForChannel(channelId string, page int, perPage int, etag string) ([]*OutgoingWebhook, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v&channel_id=%v", page, perPage, channelId) + if r, err := c.DoApiGet(c.GetOutgoingWebhooksRoute()+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return OutgoingWebhookListFromJson(r.Body), BuildResponse(r) + } +} + +// GetOutgoingWebhooksForTeam returns a page of outgoing webhooks for a team. Page counting starts at 0. +func (c *Client4) GetOutgoingWebhooksForTeam(teamId string, page int, perPage int, etag string) ([]*OutgoingWebhook, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v&team_id=%v", page, perPage, teamId) + if r, err := c.DoApiGet(c.GetOutgoingWebhooksRoute()+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return OutgoingWebhookListFromJson(r.Body), BuildResponse(r) + } +} + +// RegenOutgoingHookToken regenerate the outgoing webhook token. +func (c *Client4) RegenOutgoingHookToken(hookId string) (*OutgoingWebhook, *Response) { + if r, err := c.DoApiPost(c.GetOutgoingWebhookRoute(hookId)+"/regen_token", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return OutgoingWebhookFromJson(r.Body), BuildResponse(r) + } +} + +// DeleteOutgoingWebhook delete the outgoing webhook on the system requested by Hook Id. +func (c *Client4) DeleteOutgoingWebhook(hookId string) (bool, *Response) { + if r, err := c.DoApiDelete(c.GetOutgoingWebhookRoute(hookId)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// Preferences Section + +// GetPreferences returns the user's preferences. +func (c *Client4) GetPreferences(userId string) (Preferences, *Response) { + if r, err := c.DoApiGet(c.GetPreferencesRoute(userId), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + preferences, _ := PreferencesFromJson(r.Body) + defer closeBody(r) + return preferences, BuildResponse(r) + } +} + +// UpdatePreferences saves the user's preferences. +func (c *Client4) UpdatePreferences(userId string, preferences *Preferences) (bool, *Response) { + if r, err := c.DoApiPut(c.GetPreferencesRoute(userId), preferences.ToJson()); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return true, BuildResponse(r) + } +} + +// DeletePreferences deletes the user's preferences. +func (c *Client4) DeletePreferences(userId string, preferences *Preferences) (bool, *Response) { + if r, err := c.DoApiPost(c.GetPreferencesRoute(userId)+"/delete", preferences.ToJson()); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return true, BuildResponse(r) + } +} + +// GetPreferencesByCategory returns the user's preferences from the provided category string. +func (c *Client4) GetPreferencesByCategory(userId string, category string) (Preferences, *Response) { + url := fmt.Sprintf(c.GetPreferencesRoute(userId)+"/%s", category) + if r, err := c.DoApiGet(url, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + preferences, _ := PreferencesFromJson(r.Body) + defer closeBody(r) + return preferences, BuildResponse(r) + } +} + +// GetPreferenceByCategoryAndName returns the user's preferences from the provided category and preference name string. +func (c *Client4) GetPreferenceByCategoryAndName(userId string, category string, preferenceName string) (*Preference, *Response) { + url := fmt.Sprintf(c.GetPreferencesRoute(userId)+"/%s/name/%v", category, preferenceName) + if r, err := c.DoApiGet(url, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PreferenceFromJson(r.Body), BuildResponse(r) + } +} + +// SAML Section + +// GetSamlMetadata returns metadata for the SAML configuration. +func (c *Client4) GetSamlMetadata() (string, *Response) { + if r, err := c.DoApiGet(c.GetSamlRoute()+"/metadata", ""); err != nil { + return "", BuildErrorResponse(r, err) + } else { + defer closeBody(r) + buf := new(bytes.Buffer) + buf.ReadFrom(r.Body) + return buf.String(), BuildResponse(r) + } +} + +func samlFileToMultipart(data []byte, filename string) ([]byte, *multipart.Writer, error) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + if part, err := writer.CreateFormFile("certificate", filename); err != nil { + return nil, nil, err + } else if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { + return nil, nil, err + } + + if err := writer.Close(); err != nil { + return nil, nil, err + } + + return body.Bytes(), writer, nil +} + +// UploadSamlIdpCertificate will upload an IDP certificate for SAML and set the config to use it. +func (c *Client4) UploadSamlIdpCertificate(data []byte, filename string) (bool, *Response) { + body, writer, err := samlFileToMultipart(data, filename) + if err != nil { + return false, &Response{Error: NewAppError("UploadSamlIdpCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + _, resp := c.DoUploadFile(c.GetSamlRoute()+"/certificate/idp", body, writer.FormDataContentType()) + return resp.Error == nil, resp +} + +// UploadSamlPublicCertificate will upload a public certificate for SAML and set the config to use it. +func (c *Client4) UploadSamlPublicCertificate(data []byte, filename string) (bool, *Response) { + body, writer, err := samlFileToMultipart(data, filename) + if err != nil { + return false, &Response{Error: NewAppError("UploadSamlPublicCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + _, resp := c.DoUploadFile(c.GetSamlRoute()+"/certificate/public", body, writer.FormDataContentType()) + return resp.Error == nil, resp +} + +// UploadSamlPrivateCertificate will upload a private key for SAML and set the config to use it. +func (c *Client4) UploadSamlPrivateCertificate(data []byte, filename string) (bool, *Response) { + body, writer, err := samlFileToMultipart(data, filename) + if err != nil { + return false, &Response{Error: NewAppError("UploadSamlPrivateCertificate", "model.client.upload_saml_cert.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + _, resp := c.DoUploadFile(c.GetSamlRoute()+"/certificate/private", body, writer.FormDataContentType()) + return resp.Error == nil, resp +} + +// DeleteSamlIdpCertificate deletes the SAML IDP certificate from the server and updates the config to not use it and disable SAML. +func (c *Client4) DeleteSamlIdpCertificate() (bool, *Response) { + if r, err := c.DoApiDelete(c.GetSamlRoute() + "/certificate/idp"); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// DeleteSamlPublicCertificate deletes the SAML IDP certificate from the server and updates the config to not use it and disable SAML. +func (c *Client4) DeleteSamlPublicCertificate() (bool, *Response) { + if r, err := c.DoApiDelete(c.GetSamlRoute() + "/certificate/public"); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// DeleteSamlPrivateCertificate deletes the SAML IDP certificate from the server and updates the config to not use it and disable SAML. +func (c *Client4) DeleteSamlPrivateCertificate() (bool, *Response) { + if r, err := c.DoApiDelete(c.GetSamlRoute() + "/certificate/private"); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// GetSamlCertificateStatus returns metadata for the SAML configuration. +func (c *Client4) GetSamlCertificateStatus() (*SamlCertificateStatus, *Response) { + if r, err := c.DoApiGet(c.GetSamlRoute()+"/certificate/status", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return SamlCertificateStatusFromJson(r.Body), BuildResponse(r) + } +} + +// Compliance Section + +// CreateComplianceReport creates an incoming webhook for a channel. +func (c *Client4) CreateComplianceReport(report *Compliance) (*Compliance, *Response) { + if r, err := c.DoApiPost(c.GetComplianceReportsRoute(), report.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ComplianceFromJson(r.Body), BuildResponse(r) + } +} + +// GetComplianceReports returns list of compliance reports. +func (c *Client4) GetComplianceReports(page, perPage int) (Compliances, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetComplianceReportsRoute()+query, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CompliancesFromJson(r.Body), BuildResponse(r) + } +} + +// GetComplianceReport returns a compliance report. +func (c *Client4) GetComplianceReport(reportId string) (*Compliance, *Response) { + if r, err := c.DoApiGet(c.GetComplianceReportRoute(reportId), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ComplianceFromJson(r.Body), BuildResponse(r) + } +} + +// DownloadComplianceReport returns a full compliance report as a file. +func (c *Client4) DownloadComplianceReport(reportId string) ([]byte, *Response) { + var rq *http.Request + rq, _ = http.NewRequest("GET", c.ApiUrl+c.GetComplianceReportRoute(reportId), nil) + rq.Close = true + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, "BEARER "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil { + return nil, &Response{Error: NewAppError("DownloadComplianceReport", "model.client.connecting.app_error", nil, err.Error(), http.StatusBadRequest)} + } else { + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) + } else if data, err := ioutil.ReadAll(rp.Body); err != nil { + return nil, BuildErrorResponse(rp, NewAppError("DownloadComplianceReport", "model.client.read_file.app_error", nil, err.Error(), rp.StatusCode)) + } else { + return data, BuildResponse(rp) + } + } +} + +// Cluster Section + +// GetClusterStatus returns the status of all the configured cluster nodes. +func (c *Client4) GetClusterStatus() ([]*ClusterInfo, *Response) { + if r, err := c.DoApiGet(c.GetClusterRoute()+"/status", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ClusterInfosFromJson(r.Body), BuildResponse(r) + } +} + +// LDAP Section + +// SyncLdap will force a sync with the configured LDAP server. +func (c *Client4) SyncLdap() (bool, *Response) { + if r, err := c.DoApiPost(c.GetLdapRoute()+"/sync", ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// TestLdap will attempt to connect to the configured LDAP server and return OK if configured +// correctly. +func (c *Client4) TestLdap() (bool, *Response) { + if r, err := c.DoApiPost(c.GetLdapRoute()+"/test", ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// Audits Section + +// GetAudits returns a list of audits for the whole system. +func (c *Client4) GetAudits(page int, perPage int, etag string) (Audits, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet("/audits"+query, etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return AuditsFromJson(r.Body), BuildResponse(r) + } +} + +// Brand Section + +// GetBrandImage retrieves the previously uploaded brand image. +func (c *Client4) GetBrandImage() ([]byte, *Response) { + if r, err := c.DoApiGet(c.GetBrandRoute()+"/image", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + + if r.StatusCode >= 300 { + return nil, BuildErrorResponse(r, AppErrorFromJson(r.Body)) + } else if data, err := ioutil.ReadAll(r.Body); err != nil { + return nil, BuildErrorResponse(r, NewAppError("GetBrandImage", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) + } else { + return data, BuildResponse(r) + } + } +} + +// UploadBrandImage sets the brand image for the system. +func (c *Client4) UploadBrandImage(data []byte) (bool, *Response) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + if part, err := writer.CreateFormFile("image", "brand.png"); err != nil { + return false, &Response{Error: NewAppError("UploadBrandImage", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)} + } else if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { + return false, &Response{Error: NewAppError("UploadBrandImage", "model.client.set_profile_user.no_file.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + if err := writer.Close(); err != nil { + return false, &Response{Error: NewAppError("UploadBrandImage", "model.client.set_profile_user.writer.app_error", nil, err.Error(), http.StatusBadRequest)} + } + + rq, _ := http.NewRequest("POST", c.ApiUrl+c.GetBrandRoute()+"/image", bytes.NewReader(body.Bytes())) + rq.Header.Set("Content-Type", writer.FormDataContentType()) + rq.Close = true + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil { + return false, &Response{StatusCode: http.StatusForbidden, Error: NewAppError(c.GetBrandRoute()+"/image", "model.client.connecting.app_error", nil, err.Error(), http.StatusForbidden)} + } else { + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return false, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) + } else { + return CheckStatusOK(rp), BuildResponse(rp) + } + } +} + +// Logs Section + +// GetLogs page of logs as a string array. +func (c *Client4) GetLogs(page, perPage int) ([]string, *Response) { + query := fmt.Sprintf("?page=%v&logs_per_page=%v", page, perPage) + if r, err := c.DoApiGet("/logs"+query, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ArrayFromJson(r.Body), BuildResponse(r) + } +} + +// PostLog is a convenience Web Service call so clients can log messages into +// the server-side logs. For example we typically log javascript error messages +// into the server-side. It returns the log message if the logging was successful. +func (c *Client4) PostLog(message map[string]string) (map[string]string, *Response) { + if r, err := c.DoApiPost("/logs", MapToJson(message)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return MapFromJson(r.Body), BuildResponse(r) + } +} + +// OAuth Section + +// CreateOAuthApp will register a new OAuth 2.0 client application with Mattermost acting as an OAuth 2.0 service provider. +func (c *Client4) CreateOAuthApp(app *OAuthApp) (*OAuthApp, *Response) { + if r, err := c.DoApiPost(c.GetOAuthAppsRoute(), app.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return OAuthAppFromJson(r.Body), BuildResponse(r) + } +} + +// UpdateOAuthApp +func (c *Client4) UpdateOAuthApp(app *OAuthApp) (*OAuthApp, *Response) { + if r, err := c.DoApiPut(c.GetOAuthAppRoute(app.Id), app.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return OAuthAppFromJson(r.Body), BuildResponse(r) + } +} + +// GetOAuthApps gets a page of registered OAuth 2.0 client applications with Mattermost acting as an OAuth 2.0 service provider. +func (c *Client4) GetOAuthApps(page, perPage int) ([]*OAuthApp, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetOAuthAppsRoute()+query, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return OAuthAppListFromJson(r.Body), BuildResponse(r) + } +} + +// GetOAuthApp gets a registered OAuth 2.0 client application with Mattermost acting as an OAuth 2.0 service provider. +func (c *Client4) GetOAuthApp(appId string) (*OAuthApp, *Response) { + if r, err := c.DoApiGet(c.GetOAuthAppRoute(appId), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return OAuthAppFromJson(r.Body), BuildResponse(r) + } +} + +// GetOAuthAppInfo gets a sanitized version of a registered OAuth 2.0 client application with Mattermost acting as an OAuth 2.0 service provider. +func (c *Client4) GetOAuthAppInfo(appId string) (*OAuthApp, *Response) { + if r, err := c.DoApiGet(c.GetOAuthAppRoute(appId)+"/info", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return OAuthAppFromJson(r.Body), BuildResponse(r) + } +} + +// DeleteOAuthApp deletes a registered OAuth 2.0 client application. +func (c *Client4) DeleteOAuthApp(appId string) (bool, *Response) { + if r, err := c.DoApiDelete(c.GetOAuthAppRoute(appId)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// RegenerateOAuthAppSecret regenerates the client secret for a registered OAuth 2.0 client application. +func (c *Client4) RegenerateOAuthAppSecret(appId string) (*OAuthApp, *Response) { + if r, err := c.DoApiPost(c.GetOAuthAppRoute(appId)+"/regen_secret", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return OAuthAppFromJson(r.Body), BuildResponse(r) + } +} + +// GetAuthorizedOAuthAppsForUser gets a page of OAuth 2.0 client applications the user has authorized to use access their account. +func (c *Client4) GetAuthorizedOAuthAppsForUser(userId string, page, perPage int) ([]*OAuthApp, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetUserRoute(userId)+"/oauth/apps/authorized"+query, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return OAuthAppListFromJson(r.Body), BuildResponse(r) + } +} + +// AuthorizeOAuthApp will authorize an OAuth 2.0 client application to access a user's account and provide a redirect link to follow. +func (c *Client4) AuthorizeOAuthApp(authRequest *AuthorizeRequest) (string, *Response) { + if r, err := c.DoApiRequest(http.MethodPost, c.Url+"/oauth/authorize", authRequest.ToJson(), ""); err != nil { + return "", BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return MapFromJson(r.Body)["redirect"], BuildResponse(r) + } +} + +// DeauthorizeOAuthApp will deauthorize an OAuth 2.0 client application from accessing a user's account. +func (c *Client4) DeauthorizeOAuthApp(appId string) (bool, *Response) { + requestData := map[string]string{"client_id": appId} + if r, err := c.DoApiRequest(http.MethodPost, c.Url+"/oauth/deauthorize", MapToJson(requestData), ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// Elasticsearch Section + +// TestElasticsearch will attempt to connect to the configured Elasticsearch server and return OK if configured +// correctly. +func (c *Client4) TestElasticsearch() (bool, *Response) { + if r, err := c.DoApiPost(c.GetElasticsearchRoute()+"/test", ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// PurgeElasticsearchIndexes immediately deletes all Elasticsearch indexes. +func (c *Client4) PurgeElasticsearchIndexes() (bool, *Response) { + if r, err := c.DoApiPost(c.GetElasticsearchRoute()+"/purge_indexes", ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// Data Retention Section + +// GetDataRetentionPolicy will get the current server data retention policy details. +func (c *Client4) GetDataRetentionPolicy() (*DataRetentionPolicy, *Response) { + if r, err := c.DoApiGet(c.GetDataRetentionRoute()+"/policy", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return DataRetentionPolicyFromJson(r.Body), BuildResponse(r) + } +} + +// Commands Section + +// CreateCommand will create a new command if the user have the right permissions. +func (c *Client4) CreateCommand(cmd *Command) (*Command, *Response) { + if r, err := c.DoApiPost(c.GetCommandsRoute(), cmd.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CommandFromJson(r.Body), BuildResponse(r) + } +} + +// UpdateCommand updates a command based on the provided Command struct +func (c *Client4) UpdateCommand(cmd *Command) (*Command, *Response) { + if r, err := c.DoApiPut(c.GetCommandRoute(cmd.Id), cmd.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CommandFromJson(r.Body), BuildResponse(r) + } +} + +// DeleteCommand deletes a command based on the provided command id string +func (c *Client4) DeleteCommand(commandId string) (bool, *Response) { + if r, err := c.DoApiDelete(c.GetCommandRoute(commandId)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// ListCommands will retrieve a list of commands available in the team. +func (c *Client4) ListCommands(teamId string, customOnly bool) ([]*Command, *Response) { + query := fmt.Sprintf("?team_id=%v&custom_only=%v", teamId, customOnly) + if r, err := c.DoApiGet(c.GetCommandsRoute()+query, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CommandListFromJson(r.Body), BuildResponse(r) + } +} + +// ExecuteCommand executes a given slash command. +func (c *Client4) ExecuteCommand(channelId, command string) (*CommandResponse, *Response) { + commandArgs := &CommandArgs{ + ChannelId: channelId, + Command: command, + } + if r, err := c.DoApiPost(c.GetCommandsRoute()+"/execute", commandArgs.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CommandResponseFromJson(r.Body), BuildResponse(r) + } +} + +// ExecuteCommand executes a given slash command against the specified team +// Use this when executing slash commands in a DM/GM, since the team id cannot be inferred in that case +func (c *Client4) ExecuteCommandWithTeam(channelId, teamId, command string) (*CommandResponse, *Response) { + commandArgs := &CommandArgs{ + ChannelId: channelId, + TeamId: teamId, + Command: command, + } + if r, err := c.DoApiPost(c.GetCommandsRoute()+"/execute", commandArgs.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CommandResponseFromJson(r.Body), BuildResponse(r) + } +} + +// ListCommands will retrieve a list of commands available in the team. +func (c *Client4) ListAutocompleteCommands(teamId string) ([]*Command, *Response) { + if r, err := c.DoApiGet(c.GetTeamAutoCompleteCommandsRoute(teamId), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CommandListFromJson(r.Body), BuildResponse(r) + } +} + +// RegenCommandToken will create a new token if the user have the right permissions. +func (c *Client4) RegenCommandToken(commandId string) (string, *Response) { + if r, err := c.DoApiPut(c.GetCommandRoute(commandId)+"/regen_token", ""); err != nil { + return "", BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return MapFromJson(r.Body)["token"], BuildResponse(r) + } +} + +// Status Section + +// GetUserStatus returns a user based on the provided user id string. +func (c *Client4) GetUserStatus(userId, etag string) (*Status, *Response) { + if r, err := c.DoApiGet(c.GetUserStatusRoute(userId), etag); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return StatusFromJson(r.Body), BuildResponse(r) + } +} + +// GetUsersStatusesByIds returns a list of users status based on the provided user ids. +func (c *Client4) GetUsersStatusesByIds(userIds []string) ([]*Status, *Response) { + if r, err := c.DoApiPost(c.GetUserStatusesRoute()+"/ids", ArrayToJson(userIds)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return StatusListFromJson(r.Body), BuildResponse(r) + } +} + +// UpdateUserStatus sets a user's status based on the provided user id string. +func (c *Client4) UpdateUserStatus(userId string, userStatus *Status) (*Status, *Response) { + if r, err := c.DoApiPut(c.GetUserStatusRoute(userId), userStatus.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return StatusFromJson(r.Body), BuildResponse(r) + + } +} + +// Webrtc Section + +// GetWebrtcToken returns a valid token, stun server and turn server with credentials to +// use with the Mattermost WebRTC service. +func (c *Client4) GetWebrtcToken() (*WebrtcInfoResponse, *Response) { + if r, err := c.DoApiGet("/webrtc/token", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return WebrtcInfoResponseFromJson(r.Body), BuildResponse(r) + } +} + +// Emoji Section + +// CreateEmoji will save an emoji to the server if the current user has permission +// to do so. If successful, the provided emoji will be returned with its Id field +// filled in. Otherwise, an error will be returned. +func (c *Client4) CreateEmoji(emoji *Emoji, image []byte, filename string) (*Emoji, *Response) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + if part, err := writer.CreateFormFile("image", filename); err != nil { + return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error(), 0)} + } else if _, err = io.Copy(part, bytes.NewBuffer(image)); err != nil { + return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error(), 0)} + } + + if err := writer.WriteField("emoji", emoji.ToJson()); err != nil { + return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError("CreateEmoji", "model.client.create_emoji.emoji.app_error", nil, err.Error(), 0)} + } + + if err := writer.Close(); err != nil { + return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError("CreateEmoji", "model.client.create_emoji.writer.app_error", nil, err.Error(), 0)} + } + + return c.DoEmojiUploadFile(c.GetEmojisRoute(), body.Bytes(), writer.FormDataContentType()) +} + +// GetEmojiList returns a page of custom emoji on the system. +func (c *Client4) GetEmojiList(page, perPage int) ([]*Emoji, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + if r, err := c.DoApiGet(c.GetEmojisRoute()+query, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return EmojiListFromJson(r.Body), BuildResponse(r) + } +} + +// GetSortedEmojiList returns a page of custom emoji on the system sorted based on the sort +// parameter, blank for no sorting and "name" to sort by emoji names. +func (c *Client4) GetSortedEmojiList(page, perPage int, sort string) ([]*Emoji, *Response) { + query := fmt.Sprintf("?page=%v&per_page=%v&sort=%v", page, perPage, sort) + if r, err := c.DoApiGet(c.GetEmojisRoute()+query, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return EmojiListFromJson(r.Body), BuildResponse(r) + } +} + +// DeleteEmoji delete an custom emoji on the provided emoji id string. +func (c *Client4) DeleteEmoji(emojiId string) (bool, *Response) { + if r, err := c.DoApiDelete(c.GetEmojiRoute(emojiId)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// GetEmoji returns a custom emoji based on the emojiId string. +func (c *Client4) GetEmoji(emojiId string) (*Emoji, *Response) { + if r, err := c.DoApiGet(c.GetEmojiRoute(emojiId), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return EmojiFromJson(r.Body), BuildResponse(r) + } +} + +// GetEmojiByName returns a custom emoji based on the name string. +func (c *Client4) GetEmojiByName(name string) (*Emoji, *Response) { + if r, err := c.DoApiGet(c.GetEmojiByNameRoute(name), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return EmojiFromJson(r.Body), BuildResponse(r) + } +} + +// GetEmojiImage returns the emoji image. +func (c *Client4) GetEmojiImage(emojiId string) ([]byte, *Response) { + if r, err := c.DoApiGet(c.GetEmojiRoute(emojiId)+"/image", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + + if data, err := ioutil.ReadAll(r.Body); err != nil { + return nil, BuildErrorResponse(r, NewAppError("GetEmojiImage", "model.client.read_file.app_error", nil, err.Error(), r.StatusCode)) + } else { + return data, BuildResponse(r) + } + } +} + +// SearchEmoji returns a list of emoji matching some search criteria. +func (c *Client4) SearchEmoji(search *EmojiSearch) ([]*Emoji, *Response) { + if r, err := c.DoApiPost(c.GetEmojisRoute()+"/search", search.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return EmojiListFromJson(r.Body), BuildResponse(r) + } +} + +// AutocompleteEmoji returns a list of emoji starting with or matching name. +func (c *Client4) AutocompleteEmoji(name string, etag string) ([]*Emoji, *Response) { + query := fmt.Sprintf("?name=%v", name) + if r, err := c.DoApiGet(c.GetEmojisRoute()+"/autocomplete"+query, ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return EmojiListFromJson(r.Body), BuildResponse(r) + } +} + +// Reaction Section + +// SaveReaction saves an emoji reaction for a post. Returns the saved reaction if successful, otherwise an error will be returned. +func (c *Client4) SaveReaction(reaction *Reaction) (*Reaction, *Response) { + if r, err := c.DoApiPost(c.GetReactionsRoute(), reaction.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ReactionFromJson(r.Body), BuildResponse(r) + } +} + +// GetReactions returns a list of reactions to a post. +func (c *Client4) GetReactions(postId string) ([]*Reaction, *Response) { + if r, err := c.DoApiGet(c.GetPostRoute(postId)+"/reactions", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ReactionsFromJson(r.Body), BuildResponse(r) + } +} + +// DeleteReaction deletes reaction of a user in a post. +func (c *Client4) DeleteReaction(reaction *Reaction) (bool, *Response) { + if r, err := c.DoApiDelete(c.GetUserRoute(reaction.UserId) + c.GetPostRoute(reaction.PostId) + fmt.Sprintf("/reactions/%v", reaction.EmojiName)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// Open Graph Metadata Section + +// OpenGraph return the open graph metadata for a particular url if the site have the metadata +func (c *Client4) OpenGraph(url string) (map[string]string, *Response) { + requestBody := make(map[string]string) + requestBody["url"] = url + + if r, err := c.DoApiPost(c.GetOpenGraphRoute(), MapToJson(requestBody)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return MapFromJson(r.Body), BuildResponse(r) + } +} + +// Jobs Section + +// GetJob gets a single job. +func (c *Client4) GetJob(id string) (*Job, *Response) { + if r, err := c.DoApiGet(c.GetJobsRoute()+fmt.Sprintf("/%v", id), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return JobFromJson(r.Body), BuildResponse(r) + } +} + +// Get all jobs, sorted with the job that was created most recently first. +func (c *Client4) GetJobs(page int, perPage int) ([]*Job, *Response) { + if r, err := c.DoApiGet(c.GetJobsRoute()+fmt.Sprintf("?page=%v&per_page=%v", page, perPage), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return JobsFromJson(r.Body), BuildResponse(r) + } +} + +// GetJobsByType gets all jobs of a given type, sorted with the job that was created most recently first. +func (c *Client4) GetJobsByType(jobType string, page int, perPage int) ([]*Job, *Response) { + if r, err := c.DoApiGet(c.GetJobsRoute()+fmt.Sprintf("/type/%v?page=%v&per_page=%v", jobType, page, perPage), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return JobsFromJson(r.Body), BuildResponse(r) + } +} + +// CreateJob creates a job based on the provided job struct. +func (c *Client4) CreateJob(job *Job) (*Job, *Response) { + if r, err := c.DoApiPost(c.GetJobsRoute(), job.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return JobFromJson(r.Body), BuildResponse(r) + } +} + +// CancelJob requests the cancellation of the job with the provided Id. +func (c *Client4) CancelJob(jobId string) (bool, *Response) { + if r, err := c.DoApiPost(c.GetJobsRoute()+fmt.Sprintf("/%v/cancel", jobId), ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// Plugin Section + +// UploadPlugin takes an io.Reader stream pointing to the contents of a .tar.gz plugin. +// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. +func (c *Client4) UploadPlugin(file io.Reader) (*Manifest, *Response) { + body := new(bytes.Buffer) + writer := multipart.NewWriter(body) + + if part, err := writer.CreateFormFile("plugin", "plugin.tar.gz"); err != nil { + return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.writer.app_error", nil, err.Error(), 0)} + } else if _, err = io.Copy(part, file); err != nil { + return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.writer.app_error", nil, err.Error(), 0)} + } + + if err := writer.Close(); err != nil { + return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.writer.app_error", nil, err.Error(), 0)} + } + + rq, _ := http.NewRequest("POST", c.ApiUrl+c.GetPluginsRoute(), body) + rq.Header.Set("Content-Type", writer.FormDataContentType()) + rq.Close = true + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil { + return nil, BuildErrorResponse(rp, NewAppError("UploadPlugin", "model.client.connecting.app_error", nil, err.Error(), 0)) + } else { + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) + } else { + return ManifestFromJson(rp.Body), BuildResponse(rp) + } + } +} + +// GetPlugins will return a list of plugin manifests for currently active plugins. +// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. +func (c *Client4) GetPlugins() (*PluginsResponse, *Response) { + if r, err := c.DoApiGet(c.GetPluginsRoute(), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PluginsResponseFromJson(r.Body), BuildResponse(r) + } +} + +// RemovePlugin will deactivate and delete a plugin. +// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. +func (c *Client4) RemovePlugin(id string) (bool, *Response) { + if r, err := c.DoApiDelete(c.GetPluginRoute(id)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// GetWebappPlugins will return a list of plugins that the webapp should download. +// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. +func (c *Client4) GetWebappPlugins() ([]*Manifest, *Response) { + if r, err := c.DoApiGet(c.GetPluginsRoute()+"/webapp", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ManifestListFromJson(r.Body), BuildResponse(r) + } +} + +// ActivatePlugin will activate an plugin installed. +// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. +func (c *Client4) ActivatePlugin(id string) (bool, *Response) { + if r, err := c.DoApiPost(c.GetPluginRoute(id)+"/activate", ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// DeactivatePlugin will deactivate an active plugin. +// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. +func (c *Client4) DeactivatePlugin(id string) (bool, *Response) { + if r, err := c.DoApiPost(c.GetPluginRoute(id)+"/deactivate", ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/cluster_discovery.go b/vendor/github.com/mattermost/mattermost-server/model/cluster_discovery.go new file mode 100644 index 00000000..89e5fc95 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/cluster_discovery.go @@ -0,0 +1,133 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" + "os" +) + +const ( + CDS_OFFLINE_AFTER_MILLIS = 1000 * 60 * 30 // 30 minutes + CDS_TYPE_APP = "mattermost_app" +) + +type ClusterDiscovery struct { + Id string `json:"id"` + Type string `json:"type"` + ClusterName string `json:"cluster_name"` + Hostname string `json:"hostname"` + GossipPort int32 `json:"gossip_port"` + Port int32 `json:"port"` + CreateAt int64 `json:"create_at"` + LastPingAt int64 `json:"last_ping_at"` +} + +func (o *ClusterDiscovery) PreSave() { + if o.Id == "" { + o.Id = NewId() + } + + if o.CreateAt == 0 { + o.CreateAt = GetMillis() + o.LastPingAt = o.CreateAt + } +} + +func (o *ClusterDiscovery) AutoFillHostname() { + // attempt to set the hostname from the OS + if len(o.Hostname) == 0 { + if hn, err := os.Hostname(); err == nil { + o.Hostname = hn + } + } +} + +func (o *ClusterDiscovery) AutoFillIpAddress() { + // attempt to set the hostname to the first non-local IP address + if len(o.Hostname) == 0 { + o.Hostname = GetServerIpAddress() + } +} + +func (o *ClusterDiscovery) IsEqual(in *ClusterDiscovery) bool { + if in == nil { + return false + } + + if o.Type != in.Type { + return false + } + + if o.ClusterName != in.ClusterName { + return false + } + + if o.Hostname != in.Hostname { + return false + } + + return true +} + +func FilterClusterDiscovery(vs []*ClusterDiscovery, f func(*ClusterDiscovery) bool) []*ClusterDiscovery { + copy := make([]*ClusterDiscovery, 0) + for _, v := range vs { + if f(v) { + copy = append(copy, v) + } + } + + return copy +} + +func (o *ClusterDiscovery) IsValid() *AppError { + if len(o.Id) != 26 { + return NewAppError("Channel.IsValid", "model.channel.is_valid.id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.ClusterName) == 0 { + return NewAppError("ClusterDiscovery.IsValid", "ClusterName must be set", nil, "", http.StatusBadRequest) + } + + if len(o.Type) == 0 { + return NewAppError("ClusterDiscovery.IsValid", "Type must be set", nil, "", http.StatusBadRequest) + } + + if len(o.Hostname) == 0 { + return NewAppError("ClusterDiscovery.IsValid", "Hostname must be set", nil, "", http.StatusBadRequest) + } + + if o.CreateAt == 0 { + return NewAppError("ClusterDiscovery.IsValid", "CreateAt must be set", nil, "", http.StatusBadRequest) + } + + if o.LastPingAt == 0 { + return NewAppError("ClusterDiscovery.IsValid", "LastPingAt must be set", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (o *ClusterDiscovery) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } + + return string(b) +} + +func ClusterDiscoveryFromJson(data io.Reader) *ClusterDiscovery { + decoder := json.NewDecoder(data) + var me ClusterDiscovery + err := decoder.Decode(&me) + if err == nil { + return &me + } + + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/cluster_info.go b/vendor/github.com/mattermost/mattermost-server/model/cluster_info.go new file mode 100644 index 00000000..a8d63ec3 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/cluster_info.go @@ -0,0 +1,50 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "strings" +) + +type ClusterInfo struct { + Id string `json:"id"` + Version string `json:"version"` + ConfigHash string `json:"config_hash"` + IpAddress string `json:"ipaddress"` + Hostname string `json:"hostname"` +} + +func (me *ClusterInfo) ToJson() string { + b, _ := json.Marshal(me) + return string(b) +} + +func (me *ClusterInfo) Copy() *ClusterInfo { + json := me.ToJson() + return ClusterInfoFromJson(strings.NewReader(json)) +} + +func ClusterInfoFromJson(data io.Reader) *ClusterInfo { + var me *ClusterInfo + json.NewDecoder(data).Decode(&me) + return me +} + +func ClusterInfosToJson(objmap []*ClusterInfo) string { + b, _ := json.Marshal(objmap) + return string(b) +} + +func ClusterInfosFromJson(data io.Reader) []*ClusterInfo { + decoder := json.NewDecoder(data) + + var objmap []*ClusterInfo + if err := decoder.Decode(&objmap); err != nil { + return make([]*ClusterInfo, 0) + } else { + return objmap + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/cluster_message.go b/vendor/github.com/mattermost/mattermost-server/model/cluster_message.go new file mode 100644 index 00000000..f060c4ac --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/cluster_message.go @@ -0,0 +1,46 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + CLUSTER_EVENT_PUBLISH = "publish" + CLUSTER_EVENT_UPDATE_STATUS = "update_status" + CLUSTER_EVENT_INVALIDATE_ALL_CACHES = "inv_all_caches" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_REACTIONS = "inv_reactions" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_WEBHOOK = "inv_webhook" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_POSTS = "inv_channel_posts" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_MEMBERS_NOTIFY_PROPS = "inv_channel_members_notify_props" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_MEMBERS = "inv_channel_members" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_BY_NAME = "inv_channel_name" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL = "inv_channel" + CLUSTER_EVENT_INVALIDATE_CACHE_FOR_USER = "inv_user" + CLUSTER_EVENT_CLEAR_SESSION_CACHE_FOR_USER = "clear_session_user" + + CLUSTER_SEND_BEST_EFFORT = "best_effort" + CLUSTER_SEND_RELIABLE = "reliable" +) + +type ClusterMessage struct { + Event string `json:"event"` + SendType string `json:"-"` + WaitForAllToSend bool `json:"-"` + Data string `json:"data,omitempty"` + Props map[string]string `json:"props,omitempty"` +} + +func (o *ClusterMessage) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func ClusterMessageFromJson(data io.Reader) *ClusterMessage { + var o *ClusterMessage + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/cluster_stats.go b/vendor/github.com/mattermost/mattermost-server/model/cluster_stats.go new file mode 100644 index 00000000..064f7b81 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/cluster_stats.go @@ -0,0 +1,27 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type ClusterStats struct { + Id string `json:"id"` + TotalWebsocketConnections int `json:"total_websocket_connections"` + TotalReadDbConnections int `json:"total_read_db_connections"` + TotalMasterDbConnections int `json:"total_master_db_connections"` +} + +func (me *ClusterStats) ToJson() string { + b, _ := json.Marshal(me) + return string(b) +} + +func ClusterStatsFromJson(data io.Reader) *ClusterStats { + var me *ClusterStats + json.NewDecoder(data).Decode(&me) + return me +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/command.go b/vendor/github.com/mattermost/mattermost-server/model/command.go new file mode 100644 index 00000000..b23e5020 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/command.go @@ -0,0 +1,139 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" + "strings" +) + +const ( + COMMAND_METHOD_POST = "P" + COMMAND_METHOD_GET = "G" + MIN_TRIGGER_LENGTH = 1 + MAX_TRIGGER_LENGTH = 128 +) + +type Command struct { + Id string `json:"id"` + Token string `json:"token"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + CreatorId string `json:"creator_id"` + TeamId string `json:"team_id"` + Trigger string `json:"trigger"` + Method string `json:"method"` + Username string `json:"username"` + IconURL string `json:"icon_url"` + AutoComplete bool `json:"auto_complete"` + AutoCompleteDesc string `json:"auto_complete_desc"` + AutoCompleteHint string `json:"auto_complete_hint"` + DisplayName string `json:"display_name"` + Description string `json:"description"` + URL string `json:"url"` +} + +func (o *Command) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func CommandFromJson(data io.Reader) *Command { + var o *Command + json.NewDecoder(data).Decode(&o) + return o +} + +func CommandListToJson(l []*Command) string { + b, _ := json.Marshal(l) + return string(b) +} + +func CommandListFromJson(data io.Reader) []*Command { + var o []*Command + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *Command) IsValid() *AppError { + + if len(o.Id) != 26 { + return NewAppError("Command.IsValid", "model.command.is_valid.id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.Token) != 26 { + return NewAppError("Command.IsValid", "model.command.is_valid.token.app_error", nil, "", http.StatusBadRequest) + } + + if o.CreateAt == 0 { + return NewAppError("Command.IsValid", "model.command.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) + } + + if o.UpdateAt == 0 { + return NewAppError("Command.IsValid", "model.command.is_valid.update_at.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.CreatorId) != 26 { + return NewAppError("Command.IsValid", "model.command.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.TeamId) != 26 { + return NewAppError("Command.IsValid", "model.command.is_valid.team_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.Trigger) < MIN_TRIGGER_LENGTH || len(o.Trigger) > MAX_TRIGGER_LENGTH || strings.Index(o.Trigger, "/") == 0 || strings.Contains(o.Trigger, " ") { + return NewAppError("Command.IsValid", "model.command.is_valid.trigger.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.URL) == 0 || len(o.URL) > 1024 { + return NewAppError("Command.IsValid", "model.command.is_valid.url.app_error", nil, "", http.StatusBadRequest) + } + + if !IsValidHttpUrl(o.URL) { + return NewAppError("Command.IsValid", "model.command.is_valid.url_http.app_error", nil, "", http.StatusBadRequest) + } + + if !(o.Method == COMMAND_METHOD_GET || o.Method == COMMAND_METHOD_POST) { + return NewAppError("Command.IsValid", "model.command.is_valid.method.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.DisplayName) > 64 { + return NewAppError("Command.IsValid", "model.command.is_valid.display_name.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.Description) > 128 { + return NewAppError("Command.IsValid", "model.command.is_valid.description.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (o *Command) PreSave() { + if o.Id == "" { + o.Id = NewId() + } + + if o.Token == "" { + o.Token = NewId() + } + + o.CreateAt = GetMillis() + o.UpdateAt = o.CreateAt +} + +func (o *Command) PreUpdate() { + o.UpdateAt = GetMillis() +} + +func (o *Command) Sanitize() { + o.Token = "" + o.CreatorId = "" + o.Method = "" + o.URL = "" + o.Username = "" + o.IconURL = "" +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/command_args.go b/vendor/github.com/mattermost/mattermost-server/model/command_args.go new file mode 100644 index 00000000..4a635a1a --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/command_args.go @@ -0,0 +1,34 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + + goi18n "github.com/nicksnyder/go-i18n/i18n" +) + +type CommandArgs struct { + UserId string `json:"user_id"` + ChannelId string `json:"channel_id"` + TeamId string `json:"team_id"` + RootId string `json:"root_id"` + ParentId string `json:"parent_id"` + Command string `json:"command"` + SiteURL string `json:"-"` + T goi18n.TranslateFunc `json:"-"` + Session Session `json:"-"` +} + +func (o *CommandArgs) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func CommandArgsFromJson(data io.Reader) *CommandArgs { + var o *CommandArgs + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/command_response.go b/vendor/github.com/mattermost/mattermost-server/model/command_response.go new file mode 100644 index 00000000..cac7e845 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/command_response.go @@ -0,0 +1,61 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "io/ioutil" + "strings" +) + +const ( + COMMAND_RESPONSE_TYPE_IN_CHANNEL = "in_channel" + COMMAND_RESPONSE_TYPE_EPHEMERAL = "ephemeral" +) + +type CommandResponse struct { + ResponseType string `json:"response_type"` + Text string `json:"text"` + Username string `json:"username"` + IconURL string `json:"icon_url"` + Type string `json:"type"` + Props StringInterface `json:"props"` + GotoLocation string `json:"goto_location"` + Attachments []*SlackAttachment `json:"attachments"` +} + +func (o *CommandResponse) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func CommandResponseFromHTTPBody(contentType string, body io.Reader) *CommandResponse { + if strings.TrimSpace(strings.Split(contentType, ";")[0]) == "application/json" { + return CommandResponseFromJson(body) + } + if b, err := ioutil.ReadAll(body); err == nil { + return CommandResponseFromPlainText(string(b)) + } + return nil +} + +func CommandResponseFromPlainText(text string) *CommandResponse { + return &CommandResponse{ + Text: text, + } +} + +func CommandResponseFromJson(data io.Reader) *CommandResponse { + decoder := json.NewDecoder(data) + var o CommandResponse + + if err := decoder.Decode(&o); err != nil { + return nil + } + + o.Attachments = StringifySlackFieldValue(o.Attachments) + + return &o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/command_webhook.go b/vendor/github.com/mattermost/mattermost-server/model/command_webhook.go new file mode 100644 index 00000000..0b00e00b --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/command_webhook.go @@ -0,0 +1,65 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "net/http" +) + +type CommandWebhook struct { + Id string + CreateAt int64 + CommandId string + UserId string + ChannelId string + RootId string + ParentId string + UseCount int +} + +const ( + COMMAND_WEBHOOK_LIFETIME = 1000 * 60 * 30 +) + +func (o *CommandWebhook) PreSave() { + if o.Id == "" { + o.Id = NewId() + } + + if o.CreateAt == 0 { + o.CreateAt = GetMillis() + } +} + +func (o *CommandWebhook) IsValid() *AppError { + if len(o.Id) != 26 { + return NewAppError("CommandWebhook.IsValid", "model.command_hook.id.app_error", nil, "", http.StatusBadRequest) + } + + if o.CreateAt == 0 { + return NewAppError("CommandWebhook.IsValid", "model.command_hook.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if len(o.CommandId) != 26 { + return NewAppError("CommandWebhook.IsValid", "model.command_hook.command_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.UserId) != 26 { + return NewAppError("CommandWebhook.IsValid", "model.command_hook.user_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.ChannelId) != 26 { + return NewAppError("CommandWebhook.IsValid", "model.command_hook.channel_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.RootId) != 0 && len(o.RootId) != 26 { + return NewAppError("CommandWebhook.IsValid", "model.command_hook.root_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.ParentId) != 0 && len(o.ParentId) != 26 { + return NewAppError("CommandWebhook.IsValid", "model.command_hook.parent_id.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/compliance.go b/vendor/github.com/mattermost/mattermost-server/model/compliance.go new file mode 100644 index 00000000..5546b783 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/compliance.go @@ -0,0 +1,119 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" + "strings" +) + +const ( + COMPLIANCE_STATUS_CREATED = "created" + COMPLIANCE_STATUS_RUNNING = "running" + COMPLIANCE_STATUS_FINISHED = "finished" + COMPLIANCE_STATUS_FAILED = "failed" + COMPLIANCE_STATUS_REMOVED = "removed" + + COMPLIANCE_TYPE_DAILY = "daily" + COMPLIANCE_TYPE_ADHOC = "adhoc" +) + +type Compliance struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at"` + UserId string `json:"user_id"` + Status string `json:"status"` + Count int `json:"count"` + Desc string `json:"desc"` + Type string `json:"type"` + StartAt int64 `json:"start_at"` + EndAt int64 `json:"end_at"` + Keywords string `json:"keywords"` + Emails string `json:"emails"` +} + +type Compliances []Compliance + +func (o *Compliance) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func (me *Compliance) PreSave() { + if me.Id == "" { + me.Id = NewId() + } + + if me.Status == "" { + me.Status = COMPLIANCE_STATUS_CREATED + } + + me.Count = 0 + me.Emails = NormalizeEmail(me.Emails) + me.Keywords = strings.ToLower(me.Keywords) + + me.CreateAt = GetMillis() +} + +func (me *Compliance) JobName() string { + jobName := me.Type + if me.Type == COMPLIANCE_TYPE_DAILY { + jobName += "-" + me.Desc + } + + jobName += "-" + me.Id + + return jobName +} + +func (me *Compliance) IsValid() *AppError { + + if len(me.Id) != 26 { + return NewAppError("Compliance.IsValid", "model.compliance.is_valid.id.app_error", nil, "", http.StatusBadRequest) + } + + if me.CreateAt == 0 { + return NewAppError("Compliance.IsValid", "model.compliance.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) + } + + if len(me.Desc) > 512 || len(me.Desc) == 0 { + return NewAppError("Compliance.IsValid", "model.compliance.is_valid.desc.app_error", nil, "", http.StatusBadRequest) + } + + if me.StartAt == 0 { + return NewAppError("Compliance.IsValid", "model.compliance.is_valid.start_at.app_error", nil, "", http.StatusBadRequest) + } + + if me.EndAt == 0 { + return NewAppError("Compliance.IsValid", "model.compliance.is_valid.end_at.app_error", nil, "", http.StatusBadRequest) + } + + if me.EndAt <= me.StartAt { + return NewAppError("Compliance.IsValid", "model.compliance.is_valid.start_end_at.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func ComplianceFromJson(data io.Reader) *Compliance { + var o *Compliance + json.NewDecoder(data).Decode(&o) + return o +} + +func (o Compliances) ToJson() string { + if b, err := json.Marshal(o); err != nil { + return "[]" + } else { + return string(b) + } +} + +func CompliancesFromJson(data io.Reader) Compliances { + var o Compliances + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/compliance_post.go b/vendor/github.com/mattermost/mattermost-server/model/compliance_post.go new file mode 100644 index 00000000..3751c586 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/compliance_post.go @@ -0,0 +1,114 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "regexp" + "time" +) + +type CompliancePost struct { + + // From Team + TeamName string + TeamDisplayName string + + // From Channel + ChannelName string + ChannelDisplayName string + + // From User + UserUsername string + UserEmail string + UserNickname string + + // From Post + PostId string + PostCreateAt int64 + PostUpdateAt int64 + PostDeleteAt int64 + PostRootId string + PostParentId string + PostOriginalId string + PostMessage string + PostType string + PostProps string + PostHashtags string + PostFileIds string +} + +func CompliancePostHeader() []string { + return []string{ + "TeamName", + "TeamDisplayName", + + "ChannelName", + "ChannelDisplayName", + + "UserUsername", + "UserEmail", + "UserNickname", + + "PostId", + "PostCreateAt", + "PostUpdateAt", + "PostDeleteAt", + "PostRootId", + "PostParentId", + "PostOriginalId", + "PostMessage", + "PostType", + "PostProps", + "PostHashtags", + "PostFileIds", + } +} + +func cleanComplianceStrings(in string) string { + if matched, _ := regexp.MatchString("^\\s*(=|\\+|\\-)", in); matched { + return "'" + in + + } else { + return in + } +} + +func (me *CompliancePost) Row() []string { + + postDeleteAt := "" + if me.PostDeleteAt > 0 { + postDeleteAt = time.Unix(0, me.PostDeleteAt*int64(1000*1000)).Format(time.RFC3339) + } + + postUpdateAt := "" + if me.PostUpdateAt != me.PostCreateAt { + postUpdateAt = time.Unix(0, me.PostUpdateAt*int64(1000*1000)).Format(time.RFC3339) + } + + return []string{ + cleanComplianceStrings(me.TeamName), + cleanComplianceStrings(me.TeamDisplayName), + + cleanComplianceStrings(me.ChannelName), + cleanComplianceStrings(me.ChannelDisplayName), + + cleanComplianceStrings(me.UserUsername), + cleanComplianceStrings(me.UserEmail), + cleanComplianceStrings(me.UserNickname), + + me.PostId, + time.Unix(0, me.PostCreateAt*int64(1000*1000)).Format(time.RFC3339), + postUpdateAt, + postDeleteAt, + + me.PostRootId, + me.PostParentId, + me.PostOriginalId, + cleanComplianceStrings(me.PostMessage), + me.PostType, + me.PostProps, + me.PostHashtags, + me.PostFileIds, + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/config.go b/vendor/github.com/mattermost/mattermost-server/model/config.go new file mode 100644 index 00000000..9010eaea --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/config.go @@ -0,0 +1,2238 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + CONN_SECURITY_NONE = "" + CONN_SECURITY_PLAIN = "PLAIN" + CONN_SECURITY_TLS = "TLS" + CONN_SECURITY_STARTTLS = "STARTTLS" + + IMAGE_DRIVER_LOCAL = "local" + IMAGE_DRIVER_S3 = "amazons3" + + DATABASE_DRIVER_MYSQL = "mysql" + DATABASE_DRIVER_POSTGRES = "postgres" + + MINIO_ACCESS_KEY = "minioaccesskey" + MINIO_SECRET_KEY = "miniosecretkey" + MINIO_BUCKET = "mattermost-test" + + PASSWORD_MAXIMUM_LENGTH = 64 + PASSWORD_MINIMUM_LENGTH = 5 + + SERVICE_GITLAB = "gitlab" + SERVICE_GOOGLE = "google" + SERVICE_OFFICE365 = "office365" + + WEBSERVER_MODE_REGULAR = "regular" + WEBSERVER_MODE_GZIP = "gzip" + WEBSERVER_MODE_DISABLED = "disabled" + + GENERIC_NO_CHANNEL_NOTIFICATION = "generic_no_channel" + GENERIC_NOTIFICATION = "generic" + FULL_NOTIFICATION = "full" + + DIRECT_MESSAGE_ANY = "any" + DIRECT_MESSAGE_TEAM = "team" + + SHOW_USERNAME = "username" + SHOW_NICKNAME_FULLNAME = "nickname_full_name" + SHOW_FULLNAME = "full_name" + + PERMISSIONS_ALL = "all" + PERMISSIONS_CHANNEL_ADMIN = "channel_admin" + PERMISSIONS_TEAM_ADMIN = "team_admin" + PERMISSIONS_SYSTEM_ADMIN = "system_admin" + + FAKE_SETTING = "********************************" + + RESTRICT_EMOJI_CREATION_ALL = "all" + RESTRICT_EMOJI_CREATION_ADMIN = "admin" + RESTRICT_EMOJI_CREATION_SYSTEM_ADMIN = "system_admin" + + PERMISSIONS_DELETE_POST_ALL = "all" + PERMISSIONS_DELETE_POST_TEAM_ADMIN = "team_admin" + PERMISSIONS_DELETE_POST_SYSTEM_ADMIN = "system_admin" + + ALLOW_EDIT_POST_ALWAYS = "always" + ALLOW_EDIT_POST_NEVER = "never" + ALLOW_EDIT_POST_TIME_LIMIT = "time_limit" + + GROUP_UNREAD_CHANNELS_DISABLED = "disabled" + GROUP_UNREAD_CHANNELS_DEFAULT_ON = "default_on" + GROUP_UNREAD_CHANNELS_DEFAULT_OFF = "default_off" + + EMAIL_BATCHING_BUFFER_SIZE = 256 + EMAIL_BATCHING_INTERVAL = 30 + + EMAIL_NOTIFICATION_CONTENTS_FULL = "full" + EMAIL_NOTIFICATION_CONTENTS_GENERIC = "generic" + + SITENAME_MAX_LENGTH = 30 + + SERVICE_SETTINGS_DEFAULT_SITE_URL = "" + SERVICE_SETTINGS_DEFAULT_TLS_CERT_FILE = "" + SERVICE_SETTINGS_DEFAULT_TLS_KEY_FILE = "" + SERVICE_SETTINGS_DEFAULT_READ_TIMEOUT = 300 + SERVICE_SETTINGS_DEFAULT_WRITE_TIMEOUT = 300 + SERVICE_SETTINGS_DEFAULT_MAX_LOGIN_ATTEMPTS = 10 + SERVICE_SETTINGS_DEFAULT_ALLOW_CORS_FROM = "" + SERVICE_SETTINGS_DEFAULT_LISTEN_AND_ADDRESS = ":8065" + + TEAM_SETTINGS_DEFAULT_MAX_USERS_PER_TEAM = 50 + TEAM_SETTINGS_DEFAULT_CUSTOM_BRAND_TEXT = "" + TEAM_SETTINGS_DEFAULT_CUSTOM_DESCRIPTION_TEXT = "" + TEAM_SETTINGS_DEFAULT_USER_STATUS_AWAY_TIMEOUT = 300 + + SQL_SETTINGS_DEFAULT_DATA_SOURCE = "mmuser:mostest@tcp(dockerhost:3306)/mattermost_test?charset=utf8mb4,utf8&readTimeout=30s&writeTimeout=30s" + + EMAIL_SETTINGS_DEFAULT_FEEDBACK_ORGANIZATION = "" + + SUPPORT_SETTINGS_DEFAULT_TERMS_OF_SERVICE_LINK = "https://about.mattermost.com/default-terms/" + SUPPORT_SETTINGS_DEFAULT_PRIVACY_POLICY_LINK = "https://about.mattermost.com/default-privacy-policy/" + SUPPORT_SETTINGS_DEFAULT_ABOUT_LINK = "https://about.mattermost.com/default-about/" + SUPPORT_SETTINGS_DEFAULT_HELP_LINK = "https://about.mattermost.com/default-help/" + SUPPORT_SETTINGS_DEFAULT_REPORT_A_PROBLEM_LINK = "https://about.mattermost.com/default-report-a-problem/" + SUPPORT_SETTINGS_DEFAULT_ADMINISTRATORS_GUIDE_LINK = "https://about.mattermost.com/administrators-guide/" + SUPPORT_SETTINGS_DEFAULT_TROUBLESHOOTING_FORUM_LINK = "https://about.mattermost.com/troubleshooting-forum/" + SUPPORT_SETTINGS_DEFAULT_COMMERCIAL_SUPPORT_LINK = "https://about.mattermost.com/commercial-support/" + SUPPORT_SETTINGS_DEFAULT_SUPPORT_EMAIL = "feedback@mattermost.com" + + LDAP_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE = "" + LDAP_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE = "" + LDAP_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE = "" + LDAP_SETTINGS_DEFAULT_USERNAME_ATTRIBUTE = "" + LDAP_SETTINGS_DEFAULT_NICKNAME_ATTRIBUTE = "" + LDAP_SETTINGS_DEFAULT_ID_ATTRIBUTE = "" + LDAP_SETTINGS_DEFAULT_POSITION_ATTRIBUTE = "" + LDAP_SETTINGS_DEFAULT_LOGIN_FIELD_NAME = "" + + SAML_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE = "" + SAML_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE = "" + SAML_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE = "" + SAML_SETTINGS_DEFAULT_USERNAME_ATTRIBUTE = "" + SAML_SETTINGS_DEFAULT_NICKNAME_ATTRIBUTE = "" + SAML_SETTINGS_DEFAULT_LOCALE_ATTRIBUTE = "" + SAML_SETTINGS_DEFAULT_POSITION_ATTRIBUTE = "" + + NATIVEAPP_SETTINGS_DEFAULT_APP_DOWNLOAD_LINK = "https://about.mattermost.com/downloads/" + NATIVEAPP_SETTINGS_DEFAULT_ANDROID_APP_DOWNLOAD_LINK = "https://about.mattermost.com/mattermost-android-app/" + NATIVEAPP_SETTINGS_DEFAULT_IOS_APP_DOWNLOAD_LINK = "https://about.mattermost.com/mattermost-ios-app/" + + WEBRTC_SETTINGS_DEFAULT_STUN_URI = "" + WEBRTC_SETTINGS_DEFAULT_TURN_URI = "" + + ANALYTICS_SETTINGS_DEFAULT_MAX_USERS_FOR_STATISTICS = 2500 + + ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_COLOR = "#f2a93b" + ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_TEXT_COLOR = "#333333" + + TEAM_SETTINGS_DEFAULT_TEAM_TEXT = "default" + + ELASTICSEARCH_SETTINGS_DEFAULT_CONNECTION_URL = "" + ELASTICSEARCH_SETTINGS_DEFAULT_USERNAME = "" + ELASTICSEARCH_SETTINGS_DEFAULT_PASSWORD = "" + ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_REPLICAS = 1 + ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_SHARDS = 1 + ELASTICSEARCH_SETTINGS_DEFAULT_AGGREGATE_POSTS_AFTER_DAYS = 365 + ELASTICSEARCH_SETTINGS_DEFAULT_POSTS_AGGREGATOR_JOB_START_TIME = "03:00" + ELASTICSEARCH_SETTINGS_DEFAULT_INDEX_PREFIX = "" + ELASTICSEARCH_SETTINGS_DEFAULT_LIVE_INDEXING_BATCH_SIZE = 1 + ELASTICSEARCH_SETTINGS_DEFAULT_BULK_INDEXING_TIME_WINDOW_SECONDS = 3600 + ELASTICSEARCH_SETTINGS_DEFAULT_REQUEST_TIMEOUT_SECONDS = 30 + + DATA_RETENTION_SETTINGS_DEFAULT_MESSAGE_RETENTION_DAYS = 365 + DATA_RETENTION_SETTINGS_DEFAULT_FILE_RETENTION_DAYS = 365 + DATA_RETENTION_SETTINGS_DEFAULT_DELETION_JOB_START_TIME = "02:00" + + PLUGIN_SETTINGS_DEFAULT_DIRECTORY = "./plugins" + PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY = "./client/plugins" + + COMPLIANCE_EXPORT_TYPE_ACTIANCE = "actiance" + COMPLIANCE_EXPORT_TYPE_GLOBALRELAY = "globalrelay" +) + +type ServiceSettings struct { + SiteURL *string + LicenseFileLocation *string + ListenAddress *string + ConnectionSecurity *string + TLSCertFile *string + TLSKeyFile *string + UseLetsEncrypt *bool + LetsEncryptCertificateCacheFile *string + Forward80To443 *bool + ReadTimeout *int + WriteTimeout *int + MaximumLoginAttempts *int + GoroutineHealthThreshold *int + GoogleDeveloperKey string + EnableOAuthServiceProvider bool + EnableIncomingWebhooks bool + EnableOutgoingWebhooks bool + EnableCommands *bool + EnableOnlyAdminIntegrations *bool + EnablePostUsernameOverride bool + EnablePostIconOverride bool + EnableAPIv3 *bool + EnableLinkPreviews *bool + EnableTesting bool + EnableDeveloper *bool + EnableSecurityFixAlert *bool + EnableInsecureOutgoingConnections *bool + AllowedUntrustedInternalConnections *string + EnableMultifactorAuthentication *bool + EnforceMultifactorAuthentication *bool + EnableUserAccessTokens *bool + AllowCorsFrom *string + SessionLengthWebInDays *int + SessionLengthMobileInDays *int + SessionLengthSSOInDays *int + SessionCacheInMinutes *int + SessionIdleTimeoutInMinutes *int + WebsocketSecurePort *int + WebsocketPort *int + WebserverMode *string + EnableCustomEmoji *bool + EnableEmojiPicker *bool + RestrictCustomEmojiCreation *string + RestrictPostDelete *string + AllowEditPost *string + PostEditTimeLimit *int + TimeBetweenUserTypingUpdatesMilliseconds *int64 + EnablePostSearch *bool + EnableUserTypingMessages *bool + EnableChannelViewedMessages *bool + EnableUserStatuses *bool + ExperimentalEnableAuthenticationTransfer *bool + ClusterLogTimeoutMilliseconds *int + CloseUnusedDirectMessages *bool + EnablePreviewFeatures *bool + EnableTutorial *bool + ExperimentalEnableDefaultChannelLeaveJoinMessages *bool + ExperimentalGroupUnreadChannels *string + ImageProxyType *string + ImageProxyURL *string + ImageProxyOptions *string +} + +func (s *ServiceSettings) SetDefaults() { + if s.SiteURL == nil { + s.SiteURL = NewString(SERVICE_SETTINGS_DEFAULT_SITE_URL) + } + + if s.LicenseFileLocation == nil { + s.LicenseFileLocation = NewString("") + } + + if s.ListenAddress == nil { + s.ListenAddress = NewString(SERVICE_SETTINGS_DEFAULT_LISTEN_AND_ADDRESS) + } + + if s.EnableAPIv3 == nil { + s.EnableAPIv3 = NewBool(true) + } + + if s.EnableLinkPreviews == nil { + s.EnableLinkPreviews = NewBool(false) + } + + if s.EnableDeveloper == nil { + s.EnableDeveloper = NewBool(false) + } + + if s.EnableSecurityFixAlert == nil { + s.EnableSecurityFixAlert = NewBool(true) + } + + if s.EnableInsecureOutgoingConnections == nil { + s.EnableInsecureOutgoingConnections = NewBool(false) + } + + if s.AllowedUntrustedInternalConnections == nil { + s.AllowedUntrustedInternalConnections = NewString("") + } + + if s.EnableMultifactorAuthentication == nil { + s.EnableMultifactorAuthentication = NewBool(false) + } + + if s.EnforceMultifactorAuthentication == nil { + s.EnforceMultifactorAuthentication = NewBool(false) + } + + if s.EnableUserAccessTokens == nil { + s.EnableUserAccessTokens = NewBool(false) + } + + if s.GoroutineHealthThreshold == nil { + s.GoroutineHealthThreshold = NewInt(-1) + } + + if s.ConnectionSecurity == nil { + s.ConnectionSecurity = NewString("") + } + + if s.TLSKeyFile == nil { + s.TLSKeyFile = NewString(SERVICE_SETTINGS_DEFAULT_TLS_KEY_FILE) + } + + if s.TLSCertFile == nil { + s.TLSCertFile = NewString(SERVICE_SETTINGS_DEFAULT_TLS_CERT_FILE) + } + + if s.UseLetsEncrypt == nil { + s.UseLetsEncrypt = NewBool(false) + } + + if s.LetsEncryptCertificateCacheFile == nil { + s.LetsEncryptCertificateCacheFile = NewString("./config/letsencrypt.cache") + } + + if s.ReadTimeout == nil { + s.ReadTimeout = NewInt(SERVICE_SETTINGS_DEFAULT_READ_TIMEOUT) + } + + if s.WriteTimeout == nil { + s.WriteTimeout = NewInt(SERVICE_SETTINGS_DEFAULT_WRITE_TIMEOUT) + } + + if s.MaximumLoginAttempts == nil { + s.MaximumLoginAttempts = NewInt(SERVICE_SETTINGS_DEFAULT_MAX_LOGIN_ATTEMPTS) + } + + if s.Forward80To443 == nil { + s.Forward80To443 = NewBool(false) + } + + if s.TimeBetweenUserTypingUpdatesMilliseconds == nil { + s.TimeBetweenUserTypingUpdatesMilliseconds = NewInt64(5000) + } + + if s.EnablePostSearch == nil { + s.EnablePostSearch = NewBool(true) + } + + if s.EnableUserTypingMessages == nil { + s.EnableUserTypingMessages = NewBool(true) + } + + if s.EnableChannelViewedMessages == nil { + s.EnableChannelViewedMessages = NewBool(true) + } + + if s.EnableUserStatuses == nil { + s.EnableUserStatuses = NewBool(true) + } + + if s.ClusterLogTimeoutMilliseconds == nil { + s.ClusterLogTimeoutMilliseconds = NewInt(2000) + } + + if s.CloseUnusedDirectMessages == nil { + s.CloseUnusedDirectMessages = NewBool(false) + } + + if s.EnableTutorial == nil { + s.EnableTutorial = NewBool(true) + } + + if s.SessionLengthWebInDays == nil { + s.SessionLengthWebInDays = NewInt(30) + } + + if s.SessionLengthMobileInDays == nil { + s.SessionLengthMobileInDays = NewInt(30) + } + + if s.SessionLengthSSOInDays == nil { + s.SessionLengthSSOInDays = NewInt(30) + } + + if s.SessionCacheInMinutes == nil { + s.SessionCacheInMinutes = NewInt(10) + } + + if s.SessionIdleTimeoutInMinutes == nil { + s.SessionIdleTimeoutInMinutes = NewInt(0) + } + + if s.EnableCommands == nil { + s.EnableCommands = NewBool(false) + } + + if s.EnableOnlyAdminIntegrations == nil { + s.EnableOnlyAdminIntegrations = NewBool(true) + } + + if s.WebsocketPort == nil { + s.WebsocketPort = NewInt(80) + } + + if s.WebsocketSecurePort == nil { + s.WebsocketSecurePort = NewInt(443) + } + + if s.AllowCorsFrom == nil { + s.AllowCorsFrom = NewString(SERVICE_SETTINGS_DEFAULT_ALLOW_CORS_FROM) + } + + if s.WebserverMode == nil { + s.WebserverMode = NewString("gzip") + } else if *s.WebserverMode == "regular" { + *s.WebserverMode = "gzip" + } + + if s.EnableCustomEmoji == nil { + s.EnableCustomEmoji = NewBool(false) + } + + if s.EnableEmojiPicker == nil { + s.EnableEmojiPicker = NewBool(true) + } + + if s.RestrictCustomEmojiCreation == nil { + s.RestrictCustomEmojiCreation = NewString(RESTRICT_EMOJI_CREATION_ALL) + } + + if s.RestrictPostDelete == nil { + s.RestrictPostDelete = NewString(PERMISSIONS_DELETE_POST_ALL) + } + + if s.AllowEditPost == nil { + s.AllowEditPost = NewString(ALLOW_EDIT_POST_ALWAYS) + } + + if s.ExperimentalEnableAuthenticationTransfer == nil { + s.ExperimentalEnableAuthenticationTransfer = NewBool(true) + } + + if s.PostEditTimeLimit == nil { + s.PostEditTimeLimit = NewInt(300) + } + + if s.EnablePreviewFeatures == nil { + s.EnablePreviewFeatures = NewBool(true) + } + + if s.ExperimentalEnableDefaultChannelLeaveJoinMessages == nil { + s.ExperimentalEnableDefaultChannelLeaveJoinMessages = NewBool(true) + } + + if s.ExperimentalGroupUnreadChannels == nil { + s.ExperimentalGroupUnreadChannels = NewString(GROUP_UNREAD_CHANNELS_DISABLED) + } else if *s.ExperimentalGroupUnreadChannels == "0" { + s.ExperimentalGroupUnreadChannels = NewString(GROUP_UNREAD_CHANNELS_DISABLED) + } else if *s.ExperimentalGroupUnreadChannels == "1" { + s.ExperimentalGroupUnreadChannels = NewString(GROUP_UNREAD_CHANNELS_DEFAULT_ON) + } + + if s.ImageProxyType == nil { + s.ImageProxyType = NewString("") + } + + if s.ImageProxyURL == nil { + s.ImageProxyURL = NewString("") + } + + if s.ImageProxyOptions == nil { + s.ImageProxyOptions = NewString("") + } +} + +type ClusterSettings struct { + Enable *bool + ClusterName *string + OverrideHostname *string + UseIpAddress *bool + UseExperimentalGossip *bool + ReadOnlyConfig *bool + GossipPort *int + StreamingPort *int +} + +func (s *ClusterSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.ClusterName == nil { + s.ClusterName = NewString("") + } + + if s.OverrideHostname == nil { + s.OverrideHostname = NewString("") + } + + if s.UseIpAddress == nil { + s.UseIpAddress = NewBool(true) + } + + if s.UseExperimentalGossip == nil { + s.UseExperimentalGossip = NewBool(false) + } + + if s.ReadOnlyConfig == nil { + s.ReadOnlyConfig = NewBool(true) + } + + if s.GossipPort == nil { + s.GossipPort = NewInt(8074) + } + + if s.StreamingPort == nil { + s.StreamingPort = NewInt(8075) + } +} + +type MetricsSettings struct { + Enable *bool + BlockProfileRate *int + ListenAddress *string +} + +func (s *MetricsSettings) SetDefaults() { + if s.ListenAddress == nil { + s.ListenAddress = NewString(":8067") + } + + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.BlockProfileRate == nil { + s.BlockProfileRate = NewInt(0) + } +} + +type AnalyticsSettings struct { + MaxUsersForStatistics *int +} + +func (s *AnalyticsSettings) SetDefaults() { + if s.MaxUsersForStatistics == nil { + s.MaxUsersForStatistics = NewInt(ANALYTICS_SETTINGS_DEFAULT_MAX_USERS_FOR_STATISTICS) + } +} + +type SSOSettings struct { + Enable bool + Secret string + Id string + Scope string + AuthEndpoint string + TokenEndpoint string + UserApiEndpoint string +} + +type SqlSettings struct { + DriverName *string + DataSource *string + DataSourceReplicas []string + DataSourceSearchReplicas []string + MaxIdleConns *int + MaxOpenConns *int + Trace bool + AtRestEncryptKey string + QueryTimeout *int +} + +func (s *SqlSettings) SetDefaults() { + if s.DriverName == nil { + s.DriverName = NewString(DATABASE_DRIVER_MYSQL) + } + + if s.DataSource == nil { + s.DataSource = NewString(SQL_SETTINGS_DEFAULT_DATA_SOURCE) + } + + if len(s.AtRestEncryptKey) == 0 { + s.AtRestEncryptKey = NewRandomString(32) + } + + if s.MaxIdleConns == nil { + s.MaxIdleConns = NewInt(20) + } + + if s.MaxOpenConns == nil { + s.MaxOpenConns = NewInt(300) + } + + if s.QueryTimeout == nil { + s.QueryTimeout = NewInt(30) + } +} + +type LogSettings struct { + EnableConsole bool + ConsoleLevel string + EnableFile bool + FileLevel string + FileFormat string + FileLocation string + EnableWebhookDebugging bool + EnableDiagnostics *bool +} + +func (s *LogSettings) SetDefaults() { + if s.EnableDiagnostics == nil { + s.EnableDiagnostics = NewBool(true) + } +} + +type PasswordSettings struct { + MinimumLength *int + Lowercase *bool + Number *bool + Uppercase *bool + Symbol *bool +} + +func (s *PasswordSettings) SetDefaults() { + if s.MinimumLength == nil { + s.MinimumLength = NewInt(PASSWORD_MINIMUM_LENGTH) + } + + if s.Lowercase == nil { + s.Lowercase = NewBool(false) + } + + if s.Number == nil { + s.Number = NewBool(false) + } + + if s.Uppercase == nil { + s.Uppercase = NewBool(false) + } + + if s.Symbol == nil { + s.Symbol = NewBool(false) + } +} + +type FileSettings struct { + EnableFileAttachments *bool + EnableMobileUpload *bool + EnableMobileDownload *bool + MaxFileSize *int64 + DriverName *string + Directory string + EnablePublicLink bool + PublicLinkSalt *string + InitialFont string + AmazonS3AccessKeyId string + AmazonS3SecretAccessKey string + AmazonS3Bucket string + AmazonS3Region string + AmazonS3Endpoint string + AmazonS3SSL *bool + AmazonS3SignV2 *bool + AmazonS3SSE *bool + AmazonS3Trace *bool +} + +func (s *FileSettings) SetDefaults() { + if s.DriverName == nil { + s.DriverName = NewString(IMAGE_DRIVER_LOCAL) + } + + if s.AmazonS3Endpoint == "" { + // Defaults to "s3.amazonaws.com" + s.AmazonS3Endpoint = "s3.amazonaws.com" + } + + if s.AmazonS3SSL == nil { + s.AmazonS3SSL = NewBool(true) // Secure by default. + } + + if s.AmazonS3SignV2 == nil { + s.AmazonS3SignV2 = new(bool) + // Signature v2 is not enabled by default. + } + + if s.AmazonS3SSE == nil { + s.AmazonS3SSE = NewBool(false) // Not Encrypted by default. + } + + if s.AmazonS3Trace == nil { + s.AmazonS3Trace = NewBool(false) + } + + if s.EnableFileAttachments == nil { + s.EnableFileAttachments = NewBool(true) + } + + if s.EnableMobileUpload == nil { + s.EnableMobileUpload = NewBool(true) + } + + if s.EnableMobileDownload == nil { + s.EnableMobileDownload = NewBool(true) + } + + if s.MaxFileSize == nil { + s.MaxFileSize = NewInt64(52428800) // 50 MB + } + + if s.PublicLinkSalt == nil || len(*s.PublicLinkSalt) == 0 { + s.PublicLinkSalt = NewString(NewRandomString(32)) + } + + if s.InitialFont == "" { + // Defaults to "luximbi.ttf" + s.InitialFont = "luximbi.ttf" + } + + if s.Directory == "" { + s.Directory = "./data/" + } +} + +type EmailSettings struct { + EnableSignUpWithEmail bool + EnableSignInWithEmail *bool + EnableSignInWithUsername *bool + SendEmailNotifications bool + UseChannelInEmailNotifications *bool + RequireEmailVerification bool + FeedbackName string + FeedbackEmail string + FeedbackOrganization *string + EnableSMTPAuth *bool + SMTPUsername string + SMTPPassword string + SMTPServer string + SMTPPort string + ConnectionSecurity string + InviteSalt string + SendPushNotifications *bool + PushNotificationServer *string + PushNotificationContents *string + EnableEmailBatching *bool + EmailBatchingBufferSize *int + EmailBatchingInterval *int + SkipServerCertificateVerification *bool + EmailNotificationContentsType *string + LoginButtonColor *string + LoginButtonBorderColor *string + LoginButtonTextColor *string +} + +func (s *EmailSettings) SetDefaults() { + if len(s.InviteSalt) == 0 { + s.InviteSalt = NewRandomString(32) + } + + if s.EnableSignInWithEmail == nil { + s.EnableSignInWithEmail = NewBool(s.EnableSignUpWithEmail) + } + + if s.EnableSignInWithUsername == nil { + s.EnableSignInWithUsername = NewBool(false) + } + + if s.UseChannelInEmailNotifications == nil { + s.UseChannelInEmailNotifications = NewBool(false) + } + + if s.SendPushNotifications == nil { + s.SendPushNotifications = NewBool(false) + } + + if s.PushNotificationServer == nil { + s.PushNotificationServer = NewString("") + } + + if s.PushNotificationContents == nil { + s.PushNotificationContents = NewString(GENERIC_NOTIFICATION) + } + + if s.FeedbackOrganization == nil { + s.FeedbackOrganization = NewString(EMAIL_SETTINGS_DEFAULT_FEEDBACK_ORGANIZATION) + } + + if s.EnableEmailBatching == nil { + s.EnableEmailBatching = NewBool(false) + } + + if s.EmailBatchingBufferSize == nil { + s.EmailBatchingBufferSize = NewInt(EMAIL_BATCHING_BUFFER_SIZE) + } + + if s.EmailBatchingInterval == nil { + s.EmailBatchingInterval = NewInt(EMAIL_BATCHING_INTERVAL) + } + + if s.EnableSMTPAuth == nil { + s.EnableSMTPAuth = new(bool) + if s.ConnectionSecurity == CONN_SECURITY_NONE { + *s.EnableSMTPAuth = false + } else { + *s.EnableSMTPAuth = true + } + } + + if s.ConnectionSecurity == CONN_SECURITY_PLAIN { + s.ConnectionSecurity = CONN_SECURITY_NONE + } + + if s.SkipServerCertificateVerification == nil { + s.SkipServerCertificateVerification = NewBool(false) + } + + if s.EmailNotificationContentsType == nil { + s.EmailNotificationContentsType = NewString(EMAIL_NOTIFICATION_CONTENTS_FULL) + } + + if s.LoginButtonColor == nil { + s.LoginButtonColor = NewString("#0000") + } + + if s.LoginButtonBorderColor == nil { + s.LoginButtonBorderColor = NewString("#2389D7") + } + + if s.LoginButtonTextColor == nil { + s.LoginButtonTextColor = NewString("#2389D7") + } +} + +type RateLimitSettings struct { + Enable *bool + PerSec *int + MaxBurst *int + MemoryStoreSize *int + VaryByRemoteAddr *bool + VaryByUser *bool + VaryByHeader string +} + +func (s *RateLimitSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.PerSec == nil { + s.PerSec = NewInt(10) + } + + if s.MaxBurst == nil { + s.MaxBurst = NewInt(100) + } + + if s.MemoryStoreSize == nil { + s.MemoryStoreSize = NewInt(10000) + } + + if s.VaryByRemoteAddr == nil { + s.VaryByRemoteAddr = NewBool(true) + } + + if s.VaryByUser == nil { + s.VaryByUser = NewBool(false) + } +} + +type PrivacySettings struct { + ShowEmailAddress bool + ShowFullName bool +} + +type SupportSettings struct { + TermsOfServiceLink *string + PrivacyPolicyLink *string + AboutLink *string + HelpLink *string + ReportAProblemLink *string + SupportEmail *string +} + +func (s *SupportSettings) SetDefaults() { + if !IsSafeLink(s.TermsOfServiceLink) { + *s.TermsOfServiceLink = SUPPORT_SETTINGS_DEFAULT_TERMS_OF_SERVICE_LINK + } + + if s.TermsOfServiceLink == nil { + s.TermsOfServiceLink = NewString(SUPPORT_SETTINGS_DEFAULT_TERMS_OF_SERVICE_LINK) + } + + if !IsSafeLink(s.PrivacyPolicyLink) { + *s.PrivacyPolicyLink = "" + } + + if s.PrivacyPolicyLink == nil { + s.PrivacyPolicyLink = NewString(SUPPORT_SETTINGS_DEFAULT_PRIVACY_POLICY_LINK) + } + + if !IsSafeLink(s.AboutLink) { + *s.AboutLink = "" + } + + if s.AboutLink == nil { + s.AboutLink = NewString(SUPPORT_SETTINGS_DEFAULT_ABOUT_LINK) + } + + if !IsSafeLink(s.HelpLink) { + *s.HelpLink = "" + } + + if s.HelpLink == nil { + s.HelpLink = NewString(SUPPORT_SETTINGS_DEFAULT_HELP_LINK) + } + + if !IsSafeLink(s.ReportAProblemLink) { + *s.ReportAProblemLink = "" + } + + if s.ReportAProblemLink == nil { + s.ReportAProblemLink = NewString(SUPPORT_SETTINGS_DEFAULT_REPORT_A_PROBLEM_LINK) + } + + if s.SupportEmail == nil { + s.SupportEmail = NewString(SUPPORT_SETTINGS_DEFAULT_SUPPORT_EMAIL) + } +} + +type AnnouncementSettings struct { + EnableBanner *bool + BannerText *string + BannerColor *string + BannerTextColor *string + AllowBannerDismissal *bool +} + +func (s *AnnouncementSettings) SetDefaults() { + if s.EnableBanner == nil { + s.EnableBanner = NewBool(false) + } + + if s.BannerText == nil { + s.BannerText = NewString("") + } + + if s.BannerColor == nil { + s.BannerColor = NewString(ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_COLOR) + } + + if s.BannerTextColor == nil { + s.BannerTextColor = NewString(ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_TEXT_COLOR) + } + + if s.AllowBannerDismissal == nil { + s.AllowBannerDismissal = NewBool(true) + } +} + +type ThemeSettings struct { + EnableThemeSelection *bool + DefaultTheme *string + AllowCustomThemes *bool + AllowedThemes []string +} + +func (s *ThemeSettings) SetDefaults() { + if s.EnableThemeSelection == nil { + s.EnableThemeSelection = NewBool(true) + } + + if s.DefaultTheme == nil { + s.DefaultTheme = NewString(TEAM_SETTINGS_DEFAULT_TEAM_TEXT) + } + + if s.AllowCustomThemes == nil { + s.AllowCustomThemes = NewBool(true) + } + + if s.AllowedThemes == nil { + s.AllowedThemes = []string{} + } +} + +type TeamSettings struct { + SiteName string + MaxUsersPerTeam *int + EnableTeamCreation bool + EnableUserCreation bool + EnableOpenServer *bool + RestrictCreationToDomains string + EnableCustomBrand *bool + CustomBrandText *string + CustomDescriptionText *string + RestrictDirectMessage *string + RestrictTeamInvite *string + RestrictPublicChannelManagement *string + RestrictPrivateChannelManagement *string + RestrictPublicChannelCreation *string + RestrictPrivateChannelCreation *string + RestrictPublicChannelDeletion *string + RestrictPrivateChannelDeletion *string + RestrictPrivateChannelManageMembers *string + EnableXToLeaveChannelsFromLHS *bool + UserStatusAwayTimeout *int64 + MaxChannelsPerTeam *int64 + MaxNotificationsPerChannel *int64 + EnableConfirmNotificationsToChannel *bool + TeammateNameDisplay *string + ExperimentalTownSquareIsReadOnly *bool + ExperimentalPrimaryTeam *string +} + +func (s *TeamSettings) SetDefaults() { + if s.MaxUsersPerTeam == nil { + s.MaxUsersPerTeam = NewInt(TEAM_SETTINGS_DEFAULT_MAX_USERS_PER_TEAM) + } + + if s.EnableCustomBrand == nil { + s.EnableCustomBrand = NewBool(false) + } + + if s.CustomBrandText == nil { + s.CustomBrandText = NewString(TEAM_SETTINGS_DEFAULT_CUSTOM_BRAND_TEXT) + } + + if s.CustomDescriptionText == nil { + s.CustomDescriptionText = NewString(TEAM_SETTINGS_DEFAULT_CUSTOM_DESCRIPTION_TEXT) + } + + if s.EnableOpenServer == nil { + s.EnableOpenServer = NewBool(false) + } + + if s.RestrictDirectMessage == nil { + s.RestrictDirectMessage = NewString(DIRECT_MESSAGE_ANY) + } + + if s.RestrictTeamInvite == nil { + s.RestrictTeamInvite = NewString(PERMISSIONS_ALL) + } + + if s.RestrictPublicChannelManagement == nil { + s.RestrictPublicChannelManagement = NewString(PERMISSIONS_ALL) + } + + if s.RestrictPrivateChannelManagement == nil { + s.RestrictPrivateChannelManagement = NewString(PERMISSIONS_ALL) + } + + if s.RestrictPublicChannelCreation == nil { + s.RestrictPublicChannelCreation = new(string) + // If this setting does not exist, assume migration from <3.6, so use management setting as default. + if *s.RestrictPublicChannelManagement == PERMISSIONS_CHANNEL_ADMIN { + *s.RestrictPublicChannelCreation = PERMISSIONS_TEAM_ADMIN + } else { + *s.RestrictPublicChannelCreation = *s.RestrictPublicChannelManagement + } + } + + if s.RestrictPrivateChannelCreation == nil { + // If this setting does not exist, assume migration from <3.6, so use management setting as default. + if *s.RestrictPrivateChannelManagement == PERMISSIONS_CHANNEL_ADMIN { + s.RestrictPrivateChannelCreation = NewString(PERMISSIONS_TEAM_ADMIN) + } else { + s.RestrictPrivateChannelCreation = NewString(*s.RestrictPrivateChannelManagement) + } + } + + if s.RestrictPublicChannelDeletion == nil { + // If this setting does not exist, assume migration from <3.6, so use management setting as default. + s.RestrictPublicChannelDeletion = NewString(*s.RestrictPublicChannelManagement) + } + + if s.RestrictPrivateChannelDeletion == nil { + // If this setting does not exist, assume migration from <3.6, so use management setting as default. + s.RestrictPrivateChannelDeletion = NewString(*s.RestrictPrivateChannelManagement) + } + + if s.RestrictPrivateChannelManageMembers == nil { + s.RestrictPrivateChannelManageMembers = NewString(PERMISSIONS_ALL) + } + + if s.EnableXToLeaveChannelsFromLHS == nil { + s.EnableXToLeaveChannelsFromLHS = NewBool(false) + } + + if s.UserStatusAwayTimeout == nil { + s.UserStatusAwayTimeout = NewInt64(TEAM_SETTINGS_DEFAULT_USER_STATUS_AWAY_TIMEOUT) + } + + if s.MaxChannelsPerTeam == nil { + s.MaxChannelsPerTeam = NewInt64(2000) + } + + if s.MaxNotificationsPerChannel == nil { + s.MaxNotificationsPerChannel = NewInt64(1000) + } + + if s.EnableConfirmNotificationsToChannel == nil { + s.EnableConfirmNotificationsToChannel = NewBool(true) + } + + if s.ExperimentalTownSquareIsReadOnly == nil { + s.ExperimentalTownSquareIsReadOnly = NewBool(false) + } + + if s.ExperimentalPrimaryTeam == nil { + s.ExperimentalPrimaryTeam = NewString("") + } +} + +type ClientRequirements struct { + AndroidLatestVersion string + AndroidMinVersion string + DesktopLatestVersion string + DesktopMinVersion string + IosLatestVersion string + IosMinVersion string +} + +type LdapSettings struct { + // Basic + Enable *bool + EnableSync *bool + LdapServer *string + LdapPort *int + ConnectionSecurity *string + BaseDN *string + BindUsername *string + BindPassword *string + + // Filtering + UserFilter *string + + // User Mapping + FirstNameAttribute *string + LastNameAttribute *string + EmailAttribute *string + UsernameAttribute *string + NicknameAttribute *string + IdAttribute *string + PositionAttribute *string + + // Syncronization + SyncIntervalMinutes *int + + // Advanced + SkipCertificateVerification *bool + QueryTimeout *int + MaxPageSize *int + + // Customization + LoginFieldName *string + + LoginButtonColor *string + LoginButtonBorderColor *string + LoginButtonTextColor *string +} + +func (s *LdapSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + // When unset should default to LDAP Enabled + if s.EnableSync == nil { + s.EnableSync = NewBool(*s.Enable) + } + + if s.LdapServer == nil { + s.LdapServer = NewString("") + } + + if s.LdapPort == nil { + s.LdapPort = NewInt(389) + } + + if s.ConnectionSecurity == nil { + s.ConnectionSecurity = NewString("") + } + + if s.BaseDN == nil { + s.BaseDN = NewString("") + } + + if s.BindUsername == nil { + s.BindUsername = NewString("") + } + + if s.BindPassword == nil { + s.BindPassword = NewString("") + } + + if s.UserFilter == nil { + s.UserFilter = NewString("") + } + + if s.FirstNameAttribute == nil { + s.FirstNameAttribute = NewString(LDAP_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE) + } + + if s.LastNameAttribute == nil { + s.LastNameAttribute = NewString(LDAP_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE) + } + + if s.EmailAttribute == nil { + s.EmailAttribute = NewString(LDAP_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE) + } + + if s.UsernameAttribute == nil { + s.UsernameAttribute = NewString(LDAP_SETTINGS_DEFAULT_USERNAME_ATTRIBUTE) + } + + if s.NicknameAttribute == nil { + s.NicknameAttribute = NewString(LDAP_SETTINGS_DEFAULT_NICKNAME_ATTRIBUTE) + } + + if s.IdAttribute == nil { + s.IdAttribute = NewString(LDAP_SETTINGS_DEFAULT_ID_ATTRIBUTE) + } + + if s.PositionAttribute == nil { + s.PositionAttribute = NewString(LDAP_SETTINGS_DEFAULT_POSITION_ATTRIBUTE) + } + + if s.SyncIntervalMinutes == nil { + s.SyncIntervalMinutes = NewInt(60) + } + + if s.SkipCertificateVerification == nil { + s.SkipCertificateVerification = NewBool(false) + } + + if s.QueryTimeout == nil { + s.QueryTimeout = NewInt(60) + } + + if s.MaxPageSize == nil { + s.MaxPageSize = NewInt(0) + } + + if s.LoginFieldName == nil { + s.LoginFieldName = NewString(LDAP_SETTINGS_DEFAULT_LOGIN_FIELD_NAME) + } + + if s.LoginButtonColor == nil { + s.LoginButtonColor = NewString("#0000") + } + + if s.LoginButtonBorderColor == nil { + s.LoginButtonBorderColor = NewString("#2389D7") + } + + if s.LoginButtonTextColor == nil { + s.LoginButtonTextColor = NewString("#2389D7") + } +} + +type ComplianceSettings struct { + Enable *bool + Directory *string + EnableDaily *bool +} + +func (s *ComplianceSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.Directory == nil { + s.Directory = NewString("./data/") + } + + if s.EnableDaily == nil { + s.EnableDaily = NewBool(false) + } +} + +type LocalizationSettings struct { + DefaultServerLocale *string + DefaultClientLocale *string + AvailableLocales *string +} + +func (s *LocalizationSettings) SetDefaults() { + if s.DefaultServerLocale == nil { + s.DefaultServerLocale = NewString(DEFAULT_LOCALE) + } + + if s.DefaultClientLocale == nil { + s.DefaultClientLocale = NewString(DEFAULT_LOCALE) + } + + if s.AvailableLocales == nil { + s.AvailableLocales = NewString("") + } +} + +type SamlSettings struct { + // Basic + Enable *bool + EnableSyncWithLdap *bool + + Verify *bool + Encrypt *bool + + IdpUrl *string + IdpDescriptorUrl *string + AssertionConsumerServiceURL *string + + IdpCertificateFile *string + PublicCertificateFile *string + PrivateKeyFile *string + + // User Mapping + FirstNameAttribute *string + LastNameAttribute *string + EmailAttribute *string + UsernameAttribute *string + NicknameAttribute *string + LocaleAttribute *string + PositionAttribute *string + + LoginButtonText *string + + LoginButtonColor *string + LoginButtonBorderColor *string + LoginButtonTextColor *string +} + +func (s *SamlSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.EnableSyncWithLdap == nil { + s.EnableSyncWithLdap = NewBool(false) + } + + if s.Verify == nil { + s.Verify = NewBool(true) + } + + if s.Encrypt == nil { + s.Encrypt = NewBool(true) + } + + if s.IdpUrl == nil { + s.IdpUrl = NewString("") + } + + if s.IdpDescriptorUrl == nil { + s.IdpDescriptorUrl = NewString("") + } + + if s.IdpCertificateFile == nil { + s.IdpCertificateFile = NewString("") + } + + if s.PublicCertificateFile == nil { + s.PublicCertificateFile = NewString("") + } + + if s.PrivateKeyFile == nil { + s.PrivateKeyFile = NewString("") + } + + if s.AssertionConsumerServiceURL == nil { + s.AssertionConsumerServiceURL = NewString("") + } + + if s.LoginButtonText == nil || *s.LoginButtonText == "" { + s.LoginButtonText = NewString(USER_AUTH_SERVICE_SAML_TEXT) + } + + if s.FirstNameAttribute == nil { + s.FirstNameAttribute = NewString(SAML_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE) + } + + if s.LastNameAttribute == nil { + s.LastNameAttribute = NewString(SAML_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE) + } + + if s.EmailAttribute == nil { + s.EmailAttribute = NewString(SAML_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE) + } + + if s.UsernameAttribute == nil { + s.UsernameAttribute = NewString(SAML_SETTINGS_DEFAULT_USERNAME_ATTRIBUTE) + } + + if s.NicknameAttribute == nil { + s.NicknameAttribute = NewString(SAML_SETTINGS_DEFAULT_NICKNAME_ATTRIBUTE) + } + + if s.PositionAttribute == nil { + s.PositionAttribute = NewString(SAML_SETTINGS_DEFAULT_POSITION_ATTRIBUTE) + } + + if s.LocaleAttribute == nil { + s.LocaleAttribute = NewString(SAML_SETTINGS_DEFAULT_LOCALE_ATTRIBUTE) + } + + if s.LoginButtonColor == nil { + s.LoginButtonColor = NewString("#34a28b") + } + + if s.LoginButtonBorderColor == nil { + s.LoginButtonBorderColor = NewString("#2389D7") + } + + if s.LoginButtonTextColor == nil { + s.LoginButtonTextColor = NewString("#ffffff") + } +} + +type NativeAppSettings struct { + AppDownloadLink *string + AndroidAppDownloadLink *string + IosAppDownloadLink *string +} + +func (s *NativeAppSettings) SetDefaults() { + if s.AppDownloadLink == nil { + s.AppDownloadLink = NewString(NATIVEAPP_SETTINGS_DEFAULT_APP_DOWNLOAD_LINK) + } + + if s.AndroidAppDownloadLink == nil { + s.AndroidAppDownloadLink = NewString(NATIVEAPP_SETTINGS_DEFAULT_ANDROID_APP_DOWNLOAD_LINK) + } + + if s.IosAppDownloadLink == nil { + s.IosAppDownloadLink = NewString(NATIVEAPP_SETTINGS_DEFAULT_IOS_APP_DOWNLOAD_LINK) + } +} + +type WebrtcSettings struct { + Enable *bool + GatewayWebsocketUrl *string + GatewayAdminUrl *string + GatewayAdminSecret *string + StunURI *string + TurnURI *string + TurnUsername *string + TurnSharedKey *string +} + +func (s *WebrtcSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.GatewayWebsocketUrl == nil { + s.GatewayWebsocketUrl = NewString("") + } + + if s.GatewayAdminUrl == nil { + s.GatewayAdminUrl = NewString("") + } + + if s.GatewayAdminSecret == nil { + s.GatewayAdminSecret = NewString("") + } + + if s.StunURI == nil { + s.StunURI = NewString(WEBRTC_SETTINGS_DEFAULT_STUN_URI) + } + + if s.TurnURI == nil { + s.TurnURI = NewString(WEBRTC_SETTINGS_DEFAULT_TURN_URI) + } + + if s.TurnUsername == nil { + s.TurnUsername = NewString("") + } + + if s.TurnSharedKey == nil { + s.TurnSharedKey = NewString("") + } +} + +type ElasticsearchSettings struct { + ConnectionUrl *string + Username *string + Password *string + EnableIndexing *bool + EnableSearching *bool + Sniff *bool + PostIndexReplicas *int + PostIndexShards *int + AggregatePostsAfterDays *int + PostsAggregatorJobStartTime *string + IndexPrefix *string + LiveIndexingBatchSize *int + BulkIndexingTimeWindowSeconds *int + RequestTimeoutSeconds *int +} + +func (s *ElasticsearchSettings) SetDefaults() { + if s.ConnectionUrl == nil { + s.ConnectionUrl = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_CONNECTION_URL) + } + + if s.Username == nil { + s.Username = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_USERNAME) + } + + if s.Password == nil { + s.Password = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_PASSWORD) + } + + if s.EnableIndexing == nil { + s.EnableIndexing = NewBool(false) + } + + if s.EnableSearching == nil { + s.EnableSearching = NewBool(false) + } + + if s.Sniff == nil { + s.Sniff = NewBool(true) + } + + if s.PostIndexReplicas == nil { + s.PostIndexReplicas = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_REPLICAS) + } + + if s.PostIndexShards == nil { + s.PostIndexShards = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_SHARDS) + } + + if s.AggregatePostsAfterDays == nil { + s.AggregatePostsAfterDays = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_AGGREGATE_POSTS_AFTER_DAYS) + } + + if s.PostsAggregatorJobStartTime == nil { + s.PostsAggregatorJobStartTime = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_POSTS_AGGREGATOR_JOB_START_TIME) + } + + if s.IndexPrefix == nil { + s.IndexPrefix = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_INDEX_PREFIX) + } + + if s.LiveIndexingBatchSize == nil { + s.LiveIndexingBatchSize = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_LIVE_INDEXING_BATCH_SIZE) + } + + if s.BulkIndexingTimeWindowSeconds == nil { + s.BulkIndexingTimeWindowSeconds = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_BULK_INDEXING_TIME_WINDOW_SECONDS) + } + + if s.RequestTimeoutSeconds == nil { + s.RequestTimeoutSeconds = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_REQUEST_TIMEOUT_SECONDS) + } +} + +type DataRetentionSettings struct { + EnableMessageDeletion *bool + EnableFileDeletion *bool + MessageRetentionDays *int + FileRetentionDays *int + DeletionJobStartTime *string +} + +func (s *DataRetentionSettings) SetDefaults() { + if s.EnableMessageDeletion == nil { + s.EnableMessageDeletion = NewBool(false) + } + + if s.EnableFileDeletion == nil { + s.EnableFileDeletion = NewBool(false) + } + + if s.MessageRetentionDays == nil { + s.MessageRetentionDays = NewInt(DATA_RETENTION_SETTINGS_DEFAULT_MESSAGE_RETENTION_DAYS) + } + + if s.FileRetentionDays == nil { + s.FileRetentionDays = NewInt(DATA_RETENTION_SETTINGS_DEFAULT_FILE_RETENTION_DAYS) + } + + if s.DeletionJobStartTime == nil { + s.DeletionJobStartTime = NewString(DATA_RETENTION_SETTINGS_DEFAULT_DELETION_JOB_START_TIME) + } +} + +type JobSettings struct { + RunJobs *bool + RunScheduler *bool +} + +func (s *JobSettings) SetDefaults() { + if s.RunJobs == nil { + s.RunJobs = NewBool(true) + } + + if s.RunScheduler == nil { + s.RunScheduler = NewBool(true) + } +} + +type PluginState struct { + Enable bool +} + +type PluginSettings struct { + Enable *bool + EnableUploads *bool + Directory *string + ClientDirectory *string + Plugins map[string]interface{} + PluginStates map[string]*PluginState +} + +func (s *PluginSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(true) + } + + if s.EnableUploads == nil { + s.EnableUploads = NewBool(false) + } + + if s.Directory == nil { + s.Directory = NewString(PLUGIN_SETTINGS_DEFAULT_DIRECTORY) + } + + if *s.Directory == "" { + *s.Directory = PLUGIN_SETTINGS_DEFAULT_DIRECTORY + } + + if s.ClientDirectory == nil { + s.ClientDirectory = NewString(PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY) + } + + if *s.ClientDirectory == "" { + *s.ClientDirectory = PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY + } + + if s.Plugins == nil { + s.Plugins = make(map[string]interface{}) + } + + if s.PluginStates == nil { + s.PluginStates = make(map[string]*PluginState) + } +} + +type MessageExportSettings struct { + EnableExport *bool + ExportFormat *string + DailyRunTime *string + ExportFromTimestamp *int64 + BatchSize *int + + // formatter-specific settings - these are only expected to be non-nil if ExportFormat is set to the associated format + GlobalRelayEmailAddress *string +} + +func (s *MessageExportSettings) SetDefaults() { + if s.EnableExport == nil { + s.EnableExport = NewBool(false) + } + + if s.ExportFormat == nil { + s.ExportFormat = NewString(COMPLIANCE_EXPORT_TYPE_ACTIANCE) + } + + if s.DailyRunTime == nil { + s.DailyRunTime = NewString("01:00") + } + + if s.ExportFromTimestamp == nil { + s.ExportFromTimestamp = NewInt64(0) + } + + if s.EnableExport != nil && *s.EnableExport && *s.ExportFromTimestamp == int64(0) { + // when the feature is enabled via the System Console, use the current timestamp as the start time for future exports + s.ExportFromTimestamp = NewInt64(GetMillis()) + } else if s.EnableExport != nil && !*s.EnableExport { + // when the feature is disabled, reset the timestamp so that the timestamp will be set if the feature is re-enabled + s.ExportFromTimestamp = NewInt64(0) + } + + if s.BatchSize == nil { + s.BatchSize = NewInt(10000) + } +} + +type ConfigFunc func() *Config + +type Config struct { + ServiceSettings ServiceSettings + TeamSettings TeamSettings + ClientRequirements ClientRequirements + SqlSettings SqlSettings + LogSettings LogSettings + PasswordSettings PasswordSettings + FileSettings FileSettings + EmailSettings EmailSettings + RateLimitSettings RateLimitSettings + PrivacySettings PrivacySettings + SupportSettings SupportSettings + AnnouncementSettings AnnouncementSettings + ThemeSettings ThemeSettings + GitLabSettings SSOSettings + GoogleSettings SSOSettings + Office365Settings SSOSettings + LdapSettings LdapSettings + ComplianceSettings ComplianceSettings + LocalizationSettings LocalizationSettings + SamlSettings SamlSettings + NativeAppSettings NativeAppSettings + ClusterSettings ClusterSettings + MetricsSettings MetricsSettings + AnalyticsSettings AnalyticsSettings + WebrtcSettings WebrtcSettings + ElasticsearchSettings ElasticsearchSettings + DataRetentionSettings DataRetentionSettings + MessageExportSettings MessageExportSettings + JobSettings JobSettings + PluginSettings PluginSettings +} + +func (o *Config) Clone() *Config { + var ret Config + if err := json.Unmarshal([]byte(o.ToJson()), &ret); err != nil { + panic(err) + } + return &ret +} + +func (o *Config) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func (o *Config) GetSSOService(service string) *SSOSettings { + switch service { + case SERVICE_GITLAB: + return &o.GitLabSettings + case SERVICE_GOOGLE: + return &o.GoogleSettings + case SERVICE_OFFICE365: + return &o.Office365Settings + } + + return nil +} + +func ConfigFromJson(data io.Reader) *Config { + var o *Config + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *Config) SetDefaults() { + o.LdapSettings.SetDefaults() + o.SamlSettings.SetDefaults() + + if o.TeamSettings.TeammateNameDisplay == nil { + o.TeamSettings.TeammateNameDisplay = NewString(SHOW_USERNAME) + + if *o.SamlSettings.Enable || *o.LdapSettings.Enable { + *o.TeamSettings.TeammateNameDisplay = SHOW_FULLNAME + } + } + + o.SqlSettings.SetDefaults() + o.FileSettings.SetDefaults() + o.EmailSettings.SetDefaults() + o.ServiceSettings.SetDefaults() + o.PasswordSettings.SetDefaults() + o.TeamSettings.SetDefaults() + o.MetricsSettings.SetDefaults() + o.SupportSettings.SetDefaults() + o.AnnouncementSettings.SetDefaults() + o.ThemeSettings.SetDefaults() + o.ClusterSettings.SetDefaults() + o.PluginSettings.SetDefaults() + o.AnalyticsSettings.SetDefaults() + o.ComplianceSettings.SetDefaults() + o.LocalizationSettings.SetDefaults() + o.ElasticsearchSettings.SetDefaults() + o.NativeAppSettings.SetDefaults() + o.DataRetentionSettings.SetDefaults() + o.RateLimitSettings.SetDefaults() + o.LogSettings.SetDefaults() + o.JobSettings.SetDefaults() + o.WebrtcSettings.SetDefaults() + o.MessageExportSettings.SetDefaults() +} + +func (o *Config) IsValid() *AppError { + if len(*o.ServiceSettings.SiteURL) == 0 && *o.EmailSettings.EnableEmailBatching { + return NewAppError("Config.IsValid", "model.config.is_valid.site_url_email_batching.app_error", nil, "", http.StatusBadRequest) + } + + if *o.ClusterSettings.Enable && *o.EmailSettings.EnableEmailBatching { + return NewAppError("Config.IsValid", "model.config.is_valid.cluster_email_batching.app_error", nil, "", http.StatusBadRequest) + } + + if err := o.TeamSettings.isValid(); err != nil { + return err + } + + if err := o.SqlSettings.isValid(); err != nil { + return err + } + + if err := o.FileSettings.isValid(); err != nil { + return err + } + + if err := o.EmailSettings.isValid(); err != nil { + return err + } + + if err := o.LdapSettings.isValid(); err != nil { + return err + } + + if err := o.SamlSettings.isValid(); err != nil { + return err + } + + if *o.PasswordSettings.MinimumLength < PASSWORD_MINIMUM_LENGTH || *o.PasswordSettings.MinimumLength > PASSWORD_MAXIMUM_LENGTH { + return NewAppError("Config.IsValid", "model.config.is_valid.password_length.app_error", map[string]interface{}{"MinLength": PASSWORD_MINIMUM_LENGTH, "MaxLength": PASSWORD_MAXIMUM_LENGTH}, "", http.StatusBadRequest) + } + + if err := o.RateLimitSettings.isValid(); err != nil { + return err + } + + if err := o.WebrtcSettings.isValid(); err != nil { + return err + } + + if err := o.ServiceSettings.isValid(); err != nil { + return err + } + + if err := o.ElasticsearchSettings.isValid(); err != nil { + return err + } + + if err := o.DataRetentionSettings.isValid(); err != nil { + return err + } + + if err := o.LocalizationSettings.isValid(); err != nil { + return err + } + + if err := o.MessageExportSettings.isValid(o.FileSettings); err != nil { + return err + } + + return nil +} + +func (ts *TeamSettings) isValid() *AppError { + if *ts.MaxUsersPerTeam <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.max_users.app_error", nil, "", http.StatusBadRequest) + } + + if *ts.MaxChannelsPerTeam <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.max_channels.app_error", nil, "", http.StatusBadRequest) + } + + if *ts.MaxNotificationsPerChannel <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.max_notify_per_channel.app_error", nil, "", http.StatusBadRequest) + } + + if !(*ts.RestrictDirectMessage == DIRECT_MESSAGE_ANY || *ts.RestrictDirectMessage == DIRECT_MESSAGE_TEAM) { + return NewAppError("Config.IsValid", "model.config.is_valid.restrict_direct_message.app_error", nil, "", http.StatusBadRequest) + } + + if !(*ts.TeammateNameDisplay == SHOW_FULLNAME || *ts.TeammateNameDisplay == SHOW_NICKNAME_FULLNAME || *ts.TeammateNameDisplay == SHOW_USERNAME) { + return NewAppError("Config.IsValid", "model.config.is_valid.teammate_name_display.app_error", nil, "", http.StatusBadRequest) + } + + if len(ts.SiteName) > SITENAME_MAX_LENGTH { + return NewAppError("Config.IsValid", "model.config.is_valid.sitename_length.app_error", map[string]interface{}{"MaxLength": SITENAME_MAX_LENGTH}, "", http.StatusBadRequest) + } + + return nil +} + +func (ss *SqlSettings) isValid() *AppError { + if len(ss.AtRestEncryptKey) < 32 { + return NewAppError("Config.IsValid", "model.config.is_valid.encrypt_sql.app_error", nil, "", http.StatusBadRequest) + } + + if !(*ss.DriverName == DATABASE_DRIVER_MYSQL || *ss.DriverName == DATABASE_DRIVER_POSTGRES) { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_driver.app_error", nil, "", http.StatusBadRequest) + } + + if *ss.MaxIdleConns <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_idle.app_error", nil, "", http.StatusBadRequest) + } + + if *ss.QueryTimeout <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_query_timeout.app_error", nil, "", http.StatusBadRequest) + } + + if len(*ss.DataSource) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_data_src.app_error", nil, "", http.StatusBadRequest) + } + + if *ss.MaxOpenConns <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_max_conn.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (fs *FileSettings) isValid() *AppError { + if *fs.MaxFileSize <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.max_file_size.app_error", nil, "", http.StatusBadRequest) + } + + if !(*fs.DriverName == IMAGE_DRIVER_LOCAL || *fs.DriverName == IMAGE_DRIVER_S3) { + return NewAppError("Config.IsValid", "model.config.is_valid.file_driver.app_error", nil, "", http.StatusBadRequest) + } + + if len(*fs.PublicLinkSalt) < 32 { + return NewAppError("Config.IsValid", "model.config.is_valid.file_salt.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (es *EmailSettings) isValid() *AppError { + if !(es.ConnectionSecurity == CONN_SECURITY_NONE || es.ConnectionSecurity == CONN_SECURITY_TLS || es.ConnectionSecurity == CONN_SECURITY_STARTTLS || es.ConnectionSecurity == CONN_SECURITY_PLAIN) { + return NewAppError("Config.IsValid", "model.config.is_valid.email_security.app_error", nil, "", http.StatusBadRequest) + } + + if len(es.InviteSalt) < 32 { + return NewAppError("Config.IsValid", "model.config.is_valid.email_salt.app_error", nil, "", http.StatusBadRequest) + } + + if *es.EmailBatchingBufferSize <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.email_batching_buffer_size.app_error", nil, "", http.StatusBadRequest) + } + + if *es.EmailBatchingInterval < 30 { + return NewAppError("Config.IsValid", "model.config.is_valid.email_batching_interval.app_error", nil, "", http.StatusBadRequest) + } + + if !(*es.EmailNotificationContentsType == EMAIL_NOTIFICATION_CONTENTS_FULL || *es.EmailNotificationContentsType == EMAIL_NOTIFICATION_CONTENTS_GENERIC) { + return NewAppError("Config.IsValid", "model.config.is_valid.email_notification_contents_type.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (rls *RateLimitSettings) isValid() *AppError { + if *rls.MemoryStoreSize <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.rate_mem.app_error", nil, "", http.StatusBadRequest) + } + + if *rls.PerSec <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.rate_sec.app_error", nil, "", http.StatusBadRequest) + } + + if *rls.MaxBurst <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.max_burst.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (ls *LdapSettings) isValid() *AppError { + if !(*ls.ConnectionSecurity == CONN_SECURITY_NONE || *ls.ConnectionSecurity == CONN_SECURITY_TLS || *ls.ConnectionSecurity == CONN_SECURITY_STARTTLS) { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_security.app_error", nil, "", http.StatusBadRequest) + } + + if *ls.SyncIntervalMinutes <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_sync_interval.app_error", nil, "", http.StatusBadRequest) + } + + if *ls.MaxPageSize < 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_max_page_size.app_error", nil, "", http.StatusBadRequest) + } + + if *ls.Enable { + if *ls.LdapServer == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_server", nil, "", http.StatusBadRequest) + } + + if *ls.BaseDN == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_basedn", nil, "", http.StatusBadRequest) + } + + if *ls.EmailAttribute == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_email", nil, "", http.StatusBadRequest) + } + + if *ls.UsernameAttribute == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_username", nil, "", http.StatusBadRequest) + } + + if *ls.IdAttribute == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_id", nil, "", http.StatusBadRequest) + } + } + + return nil +} + +func (ss *SamlSettings) isValid() *AppError { + if *ss.Enable { + if len(*ss.IdpUrl) == 0 || !IsValidHttpUrl(*ss.IdpUrl) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_url.app_error", nil, "", http.StatusBadRequest) + } + + if len(*ss.IdpDescriptorUrl) == 0 || !IsValidHttpUrl(*ss.IdpDescriptorUrl) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_descriptor_url.app_error", nil, "", http.StatusBadRequest) + } + + if len(*ss.IdpCertificateFile) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_cert.app_error", nil, "", http.StatusBadRequest) + } + + if len(*ss.EmailAttribute) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_email_attribute.app_error", nil, "", http.StatusBadRequest) + } + + if len(*ss.UsernameAttribute) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_username_attribute.app_error", nil, "", http.StatusBadRequest) + } + + if *ss.Verify { + if len(*ss.AssertionConsumerServiceURL) == 0 || !IsValidHttpUrl(*ss.AssertionConsumerServiceURL) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_assertion_consumer_service_url.app_error", nil, "", http.StatusBadRequest) + } + } + + if *ss.Encrypt { + if len(*ss.PrivateKeyFile) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_private_key.app_error", nil, "", http.StatusBadRequest) + } + + if len(*ss.PublicCertificateFile) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_public_cert.app_error", nil, "", http.StatusBadRequest) + } + } + + if len(*ss.EmailAttribute) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_email_attribute.app_error", nil, "", http.StatusBadRequest) + } + } + + return nil +} + +func (ws *WebrtcSettings) isValid() *AppError { + if *ws.Enable { + if len(*ws.GatewayWebsocketUrl) == 0 || !IsValidWebsocketUrl(*ws.GatewayWebsocketUrl) { + return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_gateway_ws_url.app_error", nil, "", http.StatusBadRequest) + } else if len(*ws.GatewayAdminUrl) == 0 || !IsValidHttpUrl(*ws.GatewayAdminUrl) { + return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_gateway_admin_url.app_error", nil, "", http.StatusBadRequest) + } else if len(*ws.GatewayAdminSecret) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_gateway_admin_secret.app_error", nil, "", http.StatusBadRequest) + } else if len(*ws.StunURI) != 0 && !IsValidTurnOrStunServer(*ws.StunURI) { + return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_stun_uri.app_error", nil, "", http.StatusBadRequest) + } else if len(*ws.TurnURI) != 0 { + if !IsValidTurnOrStunServer(*ws.TurnURI) { + return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_turn_uri.app_error", nil, "", http.StatusBadRequest) + } + if len(*ws.TurnUsername) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_turn_username.app_error", nil, "", http.StatusBadRequest) + } else if len(*ws.TurnSharedKey) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_turn_shared_key.app_error", nil, "", http.StatusBadRequest) + } + } + } + + return nil +} + +func (ss *ServiceSettings) isValid() *AppError { + if !(*ss.ConnectionSecurity == CONN_SECURITY_NONE || *ss.ConnectionSecurity == CONN_SECURITY_TLS) { + return NewAppError("Config.IsValid", "model.config.is_valid.webserver_security.app_error", nil, "", http.StatusBadRequest) + } + + if *ss.ReadTimeout <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.read_timeout.app_error", nil, "", http.StatusBadRequest) + } + + if *ss.WriteTimeout <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.write_timeout.app_error", nil, "", http.StatusBadRequest) + } + + if *ss.TimeBetweenUserTypingUpdatesMilliseconds < 1000 { + return NewAppError("Config.IsValid", "model.config.is_valid.time_between_user_typing.app_error", nil, "", http.StatusBadRequest) + } + + if *ss.MaximumLoginAttempts <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.login_attempts.app_error", nil, "", http.StatusBadRequest) + } + + if len(*ss.SiteURL) != 0 { + if _, err := url.ParseRequestURI(*ss.SiteURL); err != nil { + return NewAppError("Config.IsValid", "model.config.is_valid.site_url.app_error", nil, "", http.StatusBadRequest) + } + } + + if len(*ss.ListenAddress) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.listen_address.app_error", nil, "", http.StatusBadRequest) + } + + if *ss.ExperimentalGroupUnreadChannels != GROUP_UNREAD_CHANNELS_DISABLED && + *ss.ExperimentalGroupUnreadChannels != GROUP_UNREAD_CHANNELS_DEFAULT_ON && + *ss.ExperimentalGroupUnreadChannels != GROUP_UNREAD_CHANNELS_DEFAULT_OFF { + return NewAppError("Config.IsValid", "model.config.is_valid.group_unread_channels.app_error", nil, "", http.StatusBadRequest) + } + + switch *ss.ImageProxyType { + case "", "willnorris/imageproxy": + case "atmos/camo": + if *ss.ImageProxyOptions == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.atmos_camo_image_proxy_options.app_error", nil, "", http.StatusBadRequest) + } + default: + return NewAppError("Config.IsValid", "model.config.is_valid.image_proxy_type.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (ess *ElasticsearchSettings) isValid() *AppError { + if *ess.EnableIndexing { + if len(*ess.ConnectionUrl) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.connection_url.app_error", nil, "", http.StatusBadRequest) + } + } + + if *ess.EnableSearching && !*ess.EnableIndexing { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.enable_searching.app_error", nil, "", http.StatusBadRequest) + } + + if *ess.AggregatePostsAfterDays < 1 { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.aggregate_posts_after_days.app_error", nil, "", http.StatusBadRequest) + } + + if _, err := time.Parse("15:04", *ess.PostsAggregatorJobStartTime); err != nil { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.posts_aggregator_job_start_time.app_error", nil, err.Error(), http.StatusBadRequest) + } + + if *ess.LiveIndexingBatchSize < 1 { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.live_indexing_batch_size.app_error", nil, "", http.StatusBadRequest) + } + + if *ess.BulkIndexingTimeWindowSeconds < 1 { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.bulk_indexing_time_window_seconds.app_error", nil, "", http.StatusBadRequest) + } + + if *ess.RequestTimeoutSeconds < 1 { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.request_timeout_seconds.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (drs *DataRetentionSettings) isValid() *AppError { + if *drs.MessageRetentionDays <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.message_retention_days_too_low.app_error", nil, "", http.StatusBadRequest) + } + + if *drs.FileRetentionDays <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.file_retention_days_too_low.app_error", nil, "", http.StatusBadRequest) + } + + if _, err := time.Parse("15:04", *drs.DeletionJobStartTime); err != nil { + return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.deletion_job_start_time.app_error", nil, err.Error(), http.StatusBadRequest) + } + + return nil +} + +func (ls *LocalizationSettings) isValid() *AppError { + if len(*ls.AvailableLocales) > 0 { + if !strings.Contains(*ls.AvailableLocales, *ls.DefaultClientLocale) { + return NewAppError("Config.IsValid", "model.config.is_valid.localization.available_locales.app_error", nil, "", http.StatusBadRequest) + } + } + + return nil +} + +func (mes *MessageExportSettings) isValid(fs FileSettings) *AppError { + if mes.EnableExport == nil { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.enable.app_error", nil, "", http.StatusBadRequest) + } + if *mes.EnableExport { + if mes.ExportFromTimestamp == nil || *mes.ExportFromTimestamp < 0 || *mes.ExportFromTimestamp > GetMillis() { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.export_from.app_error", nil, "", http.StatusBadRequest) + } else if mes.DailyRunTime == nil { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.daily_runtime.app_error", nil, "", http.StatusBadRequest) + } else if _, err := time.Parse("15:04", *mes.DailyRunTime); err != nil { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.daily_runtime.app_error", nil, err.Error(), http.StatusBadRequest) + } else if mes.BatchSize == nil || *mes.BatchSize < 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.batch_size.app_error", nil, "", http.StatusBadRequest) + } else if mes.ExportFormat == nil || (*mes.ExportFormat != COMPLIANCE_EXPORT_TYPE_ACTIANCE && *mes.ExportFormat != COMPLIANCE_EXPORT_TYPE_GLOBALRELAY) { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.export_type.app_error", nil, "", http.StatusBadRequest) + } + + if *mes.ExportFormat == COMPLIANCE_EXPORT_TYPE_GLOBALRELAY { + // validating email addresses is hard - just make sure it contains an '@' sign + // see https://stackoverflow.com/questions/201323/using-a-regular-expression-to-validate-an-email-address + if mes.GlobalRelayEmailAddress == nil || !strings.Contains(*mes.GlobalRelayEmailAddress, "@") { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.global_relay_email_address.app_error", nil, "", http.StatusBadRequest) + } + } + } + return nil +} + +func (o *Config) GetSanitizeOptions() map[string]bool { + options := map[string]bool{} + options["fullname"] = o.PrivacySettings.ShowFullName + options["email"] = o.PrivacySettings.ShowEmailAddress + + return options +} + +func (o *Config) Sanitize() { + if o.LdapSettings.BindPassword != nil && len(*o.LdapSettings.BindPassword) > 0 { + *o.LdapSettings.BindPassword = FAKE_SETTING + } + + *o.FileSettings.PublicLinkSalt = FAKE_SETTING + if len(o.FileSettings.AmazonS3SecretAccessKey) > 0 { + o.FileSettings.AmazonS3SecretAccessKey = FAKE_SETTING + } + + o.EmailSettings.InviteSalt = FAKE_SETTING + if len(o.EmailSettings.SMTPPassword) > 0 { + o.EmailSettings.SMTPPassword = FAKE_SETTING + } + + if len(o.GitLabSettings.Secret) > 0 { + o.GitLabSettings.Secret = FAKE_SETTING + } + + *o.SqlSettings.DataSource = FAKE_SETTING + o.SqlSettings.AtRestEncryptKey = FAKE_SETTING + + for i := range o.SqlSettings.DataSourceReplicas { + o.SqlSettings.DataSourceReplicas[i] = FAKE_SETTING + } + + for i := range o.SqlSettings.DataSourceSearchReplicas { + o.SqlSettings.DataSourceSearchReplicas[i] = FAKE_SETTING + } + + *o.ElasticsearchSettings.Password = FAKE_SETTING +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/data_retention_policy.go b/vendor/github.com/mattermost/mattermost-server/model/data_retention_policy.go new file mode 100644 index 00000000..dbb13374 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/data_retention_policy.go @@ -0,0 +1,27 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type DataRetentionPolicy struct { + MessageDeletionEnabled bool `json:"message_deletion_enabled"` + FileDeletionEnabled bool `json:"file_deletion_enabled"` + MessageRetentionCutoff int64 `json:"message_retention_cutoff"` + FileRetentionCutoff int64 `json:"file_retention_cutoff"` +} + +func (me *DataRetentionPolicy) ToJson() string { + b, _ := json.Marshal(me) + return string(b) +} + +func DataRetentionPolicyFromJson(data io.Reader) *DataRetentionPolicy { + var me *DataRetentionPolicy + json.NewDecoder(data).Decode(&me) + return me +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/emoji.go b/vendor/github.com/mattermost/mattermost-server/model/emoji.go new file mode 100644 index 00000000..a1703abb --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/emoji.go @@ -0,0 +1,83 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" +) + +const ( + EMOJI_NAME_MAX_LENGTH = 64 + EMOJI_SORT_BY_NAME = "name" +) + +type Emoji struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + CreatorId string `json:"creator_id"` + Name string `json:"name"` +} + +func (emoji *Emoji) IsValid() *AppError { + if len(emoji.Id) != 26 { + return NewAppError("Emoji.IsValid", "model.emoji.id.app_error", nil, "", http.StatusBadRequest) + } + + if emoji.CreateAt == 0 { + return NewAppError("Emoji.IsValid", "model.emoji.create_at.app_error", nil, "id="+emoji.Id, http.StatusBadRequest) + } + + if emoji.UpdateAt == 0 { + return NewAppError("Emoji.IsValid", "model.emoji.update_at.app_error", nil, "id="+emoji.Id, http.StatusBadRequest) + } + + if len(emoji.CreatorId) != 26 { + return NewAppError("Emoji.IsValid", "model.emoji.user_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(emoji.Name) == 0 || len(emoji.Name) > EMOJI_NAME_MAX_LENGTH || !IsValidAlphaNumHyphenUnderscore(emoji.Name, false) { + return NewAppError("Emoji.IsValid", "model.emoji.name.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (emoji *Emoji) PreSave() { + if emoji.Id == "" { + emoji.Id = NewId() + } + + emoji.CreateAt = GetMillis() + emoji.UpdateAt = emoji.CreateAt +} + +func (emoji *Emoji) PreUpdate() { + emoji.UpdateAt = GetMillis() +} + +func (emoji *Emoji) ToJson() string { + b, _ := json.Marshal(emoji) + return string(b) +} + +func EmojiFromJson(data io.Reader) *Emoji { + var emoji *Emoji + json.NewDecoder(data).Decode(&emoji) + return emoji +} + +func EmojiListToJson(emojiList []*Emoji) string { + b, _ := json.Marshal(emojiList) + return string(b) +} + +func EmojiListFromJson(data io.Reader) []*Emoji { + var emojiList []*Emoji + json.NewDecoder(data).Decode(&emojiList) + return emojiList +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/emoji_search.go b/vendor/github.com/mattermost/mattermost-server/model/emoji_search.go new file mode 100644 index 00000000..3a768a57 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/emoji_search.go @@ -0,0 +1,25 @@ +// Copyright (c) 2018-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type EmojiSearch struct { + Term string `json:"term"` + PrefixOnly bool `json:"prefix_only"` +} + +func (es *EmojiSearch) ToJson() string { + b, _ := json.Marshal(es) + return string(b) +} + +func EmojiSearchFromJson(data io.Reader) *EmojiSearch { + var es *EmojiSearch + json.NewDecoder(data).Decode(&es) + return es +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/file.go b/vendor/github.com/mattermost/mattermost-server/model/file.go new file mode 100644 index 00000000..c7ffbf0b --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/file.go @@ -0,0 +1,34 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + MaxImageSize = 6048 * 4032 // 24 megapixels, roughly 36MB as a raw image +) + +var ( + IMAGE_EXTENSIONS = [5]string{".jpg", ".jpeg", ".gif", ".bmp", ".png"} + IMAGE_MIME_TYPES = map[string]string{".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".gif": "image/gif", ".bmp": "image/bmp", ".png": "image/png", ".tiff": "image/tiff"} +) + +type FileUploadResponse struct { + FileInfos []*FileInfo `json:"file_infos"` + ClientIds []string `json:"client_ids"` +} + +func FileUploadResponseFromJson(data io.Reader) *FileUploadResponse { + var o *FileUploadResponse + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *FileUploadResponse) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/file_info.go b/vendor/github.com/mattermost/mattermost-server/model/file_info.go new file mode 100644 index 00000000..e0bbfcfc --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/file_info.go @@ -0,0 +1,170 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "bytes" + "encoding/json" + "image" + "image/gif" + "io" + "mime" + "net/http" + "path/filepath" + "strings" +) + +type FileInfo struct { + Id string `json:"id"` + CreatorId string `json:"user_id"` + PostId string `json:"post_id,omitempty"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + Path string `json:"-"` // not sent back to the client + ThumbnailPath string `json:"-"` // not sent back to the client + PreviewPath string `json:"-"` // not sent back to the client + Name string `json:"name"` + Extension string `json:"extension"` + Size int64 `json:"size"` + MimeType string `json:"mime_type"` + Width int `json:"width,omitempty"` + Height int `json:"height,omitempty"` + HasPreviewImage bool `json:"has_preview_image,omitempty"` +} + +func (info *FileInfo) ToJson() string { + b, _ := json.Marshal(info) + return string(b) +} + +func FileInfoFromJson(data io.Reader) *FileInfo { + decoder := json.NewDecoder(data) + + var info FileInfo + if err := decoder.Decode(&info); err != nil { + return nil + } else { + return &info + } +} + +func FileInfosToJson(infos []*FileInfo) string { + b, _ := json.Marshal(infos) + return string(b) +} + +func FileInfosFromJson(data io.Reader) []*FileInfo { + decoder := json.NewDecoder(data) + + var infos []*FileInfo + if err := decoder.Decode(&infos); err != nil { + return nil + } else { + return infos + } +} + +func (o *FileInfo) PreSave() { + if o.Id == "" { + o.Id = NewId() + } + + if o.CreateAt == 0 { + o.CreateAt = GetMillis() + } + + if o.UpdateAt < o.CreateAt { + o.UpdateAt = o.CreateAt + } +} + +func (o *FileInfo) IsValid() *AppError { + if len(o.Id) != 26 { + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.CreatorId) != 26 { + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.user_id.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if len(o.PostId) != 0 && len(o.PostId) != 26 { + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.post_id.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if o.CreateAt == 0 { + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if o.UpdateAt == 0 { + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if o.Path == "" { + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.path.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + return nil +} + +func (o *FileInfo) IsImage() bool { + return strings.HasPrefix(o.MimeType, "image") +} + +func GetInfoForBytes(name string, data []byte) (*FileInfo, *AppError) { + info := &FileInfo{ + Name: name, + Size: int64(len(data)), + } + var err *AppError + + extension := strings.ToLower(filepath.Ext(name)) + info.MimeType = mime.TypeByExtension(extension) + + if extension != "" && extension[0] == '.' { + // The client expects a file extension without the leading period + info.Extension = extension[1:] + } else { + info.Extension = extension + } + + if info.IsImage() { + // Only set the width and height if it's actually an image that we can understand + if config, _, err := image.DecodeConfig(bytes.NewReader(data)); err == nil { + info.Width = config.Width + info.Height = config.Height + + if info.MimeType == "image/gif" { + // Just show the gif itself instead of a preview image for animated gifs + if gifConfig, err := gif.DecodeAll(bytes.NewReader(data)); err != nil { + // Still return the rest of the info even though it doesn't appear to be an actual gif + info.HasPreviewImage = true + err = NewAppError("GetInfoForBytes", "model.file_info.get.gif.app_error", nil, "name="+name, http.StatusBadRequest) + } else { + info.HasPreviewImage = len(gifConfig.Image) == 1 + } + } else { + info.HasPreviewImage = true + } + } + } + + return info, err +} + +func GetEtagForFileInfos(infos []*FileInfo) string { + if len(infos) == 0 { + return Etag() + } + + var maxUpdateAt int64 + + for _, info := range infos { + if info.UpdateAt > maxUpdateAt { + maxUpdateAt = info.UpdateAt + } + } + + return Etag(infos[0].PostId, maxUpdateAt) +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/gitlab.go b/vendor/github.com/mattermost/mattermost-server/model/gitlab.go new file mode 100644 index 00000000..8777614c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/gitlab.go @@ -0,0 +1,8 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +const ( + USER_AUTH_SERVICE_GITLAB = "gitlab" +) diff --git a/vendor/github.com/mattermost/mattermost-server/model/gitlab/gitlab.go b/vendor/github.com/mattermost/mattermost-server/model/gitlab/gitlab.go new file mode 100644 index 00000000..7e0cb10a --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/gitlab/gitlab.go @@ -0,0 +1,114 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package oauthgitlab + +import ( + "encoding/json" + "io" + "strconv" + "strings" + + "github.com/mattermost/mattermost-server/einterfaces" + "github.com/mattermost/mattermost-server/model" +) + +type GitLabProvider struct { +} + +type GitLabUser struct { + Id int64 `json:"id"` + Username string `json:"username"` + Login string `json:"login"` + Email string `json:"email"` + Name string `json:"name"` +} + +func init() { + provider := &GitLabProvider{} + einterfaces.RegisterOauthProvider(model.USER_AUTH_SERVICE_GITLAB, provider) +} + +func userFromGitLabUser(glu *GitLabUser) *model.User { + user := &model.User{} + username := glu.Username + if username == "" { + username = glu.Login + } + user.Username = model.CleanUsername(username) + splitName := strings.Split(glu.Name, " ") + if len(splitName) == 2 { + user.FirstName = splitName[0] + user.LastName = splitName[1] + } else if len(splitName) >= 2 { + user.FirstName = splitName[0] + user.LastName = strings.Join(splitName[1:], " ") + } else { + user.FirstName = glu.Name + } + user.Email = glu.Email + userId := strconv.FormatInt(glu.Id, 10) + user.AuthData = &userId + user.AuthService = model.USER_AUTH_SERVICE_GITLAB + + return user +} + +func gitLabUserFromJson(data io.Reader) *GitLabUser { + decoder := json.NewDecoder(data) + var glu GitLabUser + err := decoder.Decode(&glu) + if err == nil { + return &glu + } else { + return nil + } +} + +func (glu *GitLabUser) ToJson() string { + b, err := json.Marshal(glu) + if err != nil { + return "" + } else { + return string(b) + } +} + +func (glu *GitLabUser) IsValid() bool { + if glu.Id == 0 { + return false + } + + if len(glu.Email) == 0 { + return false + } + + return true +} + +func (glu *GitLabUser) getAuthData() string { + return strconv.FormatInt(glu.Id, 10) +} + +func (m *GitLabProvider) GetIdentifier() string { + return model.USER_AUTH_SERVICE_GITLAB +} + +func (m *GitLabProvider) GetUserFromJson(data io.Reader) *model.User { + glu := gitLabUserFromJson(data) + if glu.IsValid() { + return userFromGitLabUser(glu) + } + + return &model.User{} +} + +func (m *GitLabProvider) GetAuthDataFromJson(data io.Reader) string { + glu := gitLabUserFromJson(data) + + if glu.IsValid() { + return glu.getAuthData() + } + + return "" +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/incoming_webhook.go b/vendor/github.com/mattermost/mattermost-server/model/incoming_webhook.go new file mode 100644 index 00000000..b38cfeec --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/incoming_webhook.go @@ -0,0 +1,206 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "regexp" +) + +const ( + DEFAULT_WEBHOOK_USERNAME = "webhook" +) + +type IncomingWebhook struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + UserId string `json:"user_id"` + ChannelId string `json:"channel_id"` + TeamId string `json:"team_id"` + DisplayName string `json:"display_name"` + Description string `json:"description"` + Username string `json:"username"` + IconURL string `json:"icon_url"` +} + +type IncomingWebhookRequest struct { + Text string `json:"text"` + Username string `json:"username"` + IconURL string `json:"icon_url"` + ChannelName string `json:"channel"` + Props StringInterface `json:"props"` + Attachments []*SlackAttachment `json:"attachments"` + Type string `json:"type"` +} + +func (o *IncomingWebhook) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func IncomingWebhookFromJson(data io.Reader) *IncomingWebhook { + var o *IncomingWebhook + json.NewDecoder(data).Decode(&o) + return o +} + +func IncomingWebhookListToJson(l []*IncomingWebhook) string { + b, _ := json.Marshal(l) + return string(b) +} + +func IncomingWebhookListFromJson(data io.Reader) []*IncomingWebhook { + var o []*IncomingWebhook + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *IncomingWebhook) IsValid() *AppError { + + if len(o.Id) != 26 { + return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.id.app_error", nil, "", http.StatusBadRequest) + + } + + if o.CreateAt == 0 { + return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if o.UpdateAt == 0 { + return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if len(o.UserId) != 26 { + return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.user_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.ChannelId) != 26 { + return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.channel_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.TeamId) != 26 { + return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.team_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.DisplayName) > 64 { + return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.display_name.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.Description) > 128 { + return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.description.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.Username) > 64 { + return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.username.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.IconURL) > 1024 { + return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.icon_url.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (o *IncomingWebhook) PreSave() { + if o.Id == "" { + o.Id = NewId() + } + + o.CreateAt = GetMillis() + o.UpdateAt = o.CreateAt +} + +func (o *IncomingWebhook) PreUpdate() { + o.UpdateAt = GetMillis() +} + +// escapeControlCharsFromPayload escapes control chars (\n, \t) from a byte slice. +// Context: +// JSON strings are not supposed to contain control characters such as \n, \t, +// ... but some incoming webhooks might still send invalid JSON and we want to +// try to handle that. An example invalid JSON string from an incoming webhook +// might look like this (strings for both "text" and "fallback" attributes are +// invalid JSON strings because they contain unescaped newlines and tabs): +// `{ +// "text": "this is a test +// that contains a newline and tabs", +// "attachments": [ +// { +// "fallback": "Required plain-text summary of the attachment +// that contains a newline and tabs", +// "color": "#36a64f", +// ... +// "text": "Optional text that appears within the attachment +// that contains a newline and tabs", +// ... +// "thumb_url": "http://example.com/path/to/thumb.png" +// } +// ] +// }` +// This function will search for `"key": "value"` pairs, and escape \n, \t +// from the value. +func escapeControlCharsFromPayload(by []byte) []byte { + // we'll search for `"text": "..."` or `"fallback": "..."`, ... + keys := "text|fallback|pretext|author_name|title|value" + + // the regexp reads like this: + // (?s): this flag let . match \n (default is false) + // "(keys)": we search for the keys defined above + // \s*:\s*: followed by 0..n spaces/tabs, a colon then 0..n spaces/tabs + // ": a double-quote + // (\\"|[^"])*: any number of times the `\"` string or any char but a double-quote + // ": a double-quote + r := `(?s)"(` + keys + `)"\s*:\s*"(\\"|[^"])*"` + re := regexp.MustCompile(r) + + // the function that will escape \n and \t on the regexp matches + repl := func(b []byte) []byte { + if bytes.Contains(b, []byte("\n")) { + b = bytes.Replace(b, []byte("\n"), []byte("\\n"), -1) + } + if bytes.Contains(b, []byte("\t")) { + b = bytes.Replace(b, []byte("\t"), []byte("\\t"), -1) + } + + return b + } + + return re.ReplaceAllFunc(by, repl) +} + +func decodeIncomingWebhookRequest(by []byte) (*IncomingWebhookRequest, error) { + decoder := json.NewDecoder(bytes.NewReader(by)) + var o IncomingWebhookRequest + err := decoder.Decode(&o) + if err == nil { + return &o, nil + } else { + return nil, err + } +} + +func IncomingWebhookRequestFromJson(data io.Reader) (*IncomingWebhookRequest, *AppError) { + buf := new(bytes.Buffer) + buf.ReadFrom(data) + by := buf.Bytes() + + // Try to decode the JSON data. Only if it fails, try to escape control + // characters from the strings contained in the JSON data. + o, err := decodeIncomingWebhookRequest(by) + if err != nil { + o, err = decodeIncomingWebhookRequest(escapeControlCharsFromPayload(by)) + if err != nil { + return nil, NewAppError("IncomingWebhookRequestFromJson", "Unable to parse incoming data", nil, err.Error(), http.StatusBadRequest) + } + } + + o.Attachments = StringifySlackFieldValue(o.Attachments) + + return o, nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/initial_load.go b/vendor/github.com/mattermost/mattermost-server/model/initial_load.go new file mode 100644 index 00000000..3be68044 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/initial_load.go @@ -0,0 +1,30 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type InitialLoad struct { + User *User `json:"user"` + TeamMembers []*TeamMember `json:"team_members"` + Teams []*Team `json:"teams"` + Preferences Preferences `json:"preferences"` + ClientCfg map[string]string `json:"client_cfg"` + LicenseCfg map[string]string `json:"license_cfg"` + NoAccounts bool `json:"no_accounts"` +} + +func (me *InitialLoad) ToJson() string { + b, _ := json.Marshal(me) + return string(b) +} + +func InitialLoadFromJson(data io.Reader) *InitialLoad { + var o *InitialLoad + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/job.go b/vendor/github.com/mattermost/mattermost-server/model/job.go new file mode 100644 index 00000000..e10ed1f5 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/job.go @@ -0,0 +1,118 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" + "time" +) + +const ( + JOB_TYPE_DATA_RETENTION = "data_retention" + JOB_TYPE_MESSAGE_EXPORT = "message_export" + JOB_TYPE_ELASTICSEARCH_POST_INDEXING = "elasticsearch_post_indexing" + JOB_TYPE_ELASTICSEARCH_POST_AGGREGATION = "elasticsearch_post_aggregation" + JOB_TYPE_LDAP_SYNC = "ldap_sync" + + JOB_STATUS_PENDING = "pending" + JOB_STATUS_IN_PROGRESS = "in_progress" + JOB_STATUS_SUCCESS = "success" + JOB_STATUS_ERROR = "error" + JOB_STATUS_CANCEL_REQUESTED = "cancel_requested" + JOB_STATUS_CANCELED = "canceled" +) + +type Job struct { + Id string `json:"id"` + Type string `json:"type"` + Priority int64 `json:"priority"` + CreateAt int64 `json:"create_at"` + StartAt int64 `json:"start_at"` + LastActivityAt int64 `json:"last_activity_at"` + Status string `json:"status"` + Progress int64 `json:"progress"` + Data map[string]string `json:"data"` +} + +func (j *Job) IsValid() *AppError { + if len(j.Id) != 26 { + return NewAppError("Job.IsValid", "model.job.is_valid.id.app_error", nil, "id="+j.Id, http.StatusBadRequest) + } + + if j.CreateAt == 0 { + return NewAppError("Job.IsValid", "model.job.is_valid.create_at.app_error", nil, "id="+j.Id, http.StatusBadRequest) + } + + switch j.Type { + case JOB_TYPE_DATA_RETENTION: + case JOB_TYPE_ELASTICSEARCH_POST_INDEXING: + case JOB_TYPE_ELASTICSEARCH_POST_AGGREGATION: + case JOB_TYPE_LDAP_SYNC: + case JOB_TYPE_MESSAGE_EXPORT: + default: + return NewAppError("Job.IsValid", "model.job.is_valid.type.app_error", nil, "id="+j.Id, http.StatusBadRequest) + } + + switch j.Status { + case JOB_STATUS_PENDING: + case JOB_STATUS_IN_PROGRESS: + case JOB_STATUS_SUCCESS: + case JOB_STATUS_ERROR: + case JOB_STATUS_CANCEL_REQUESTED: + case JOB_STATUS_CANCELED: + default: + return NewAppError("Job.IsValid", "model.job.is_valid.status.app_error", nil, "id="+j.Id, http.StatusBadRequest) + } + + return nil +} + +func (js *Job) ToJson() string { + b, _ := json.Marshal(js) + return string(b) +} + +func JobFromJson(data io.Reader) *Job { + var job Job + if err := json.NewDecoder(data).Decode(&job); err == nil { + return &job + } else { + return nil + } +} + +func JobsToJson(jobs []*Job) string { + b, _ := json.Marshal(jobs) + return string(b) +} + +func JobsFromJson(data io.Reader) []*Job { + var jobs []*Job + if err := json.NewDecoder(data).Decode(&jobs); err == nil { + return jobs + } else { + return nil + } +} + +func (js *Job) DataToJson() string { + b, _ := json.Marshal(js.Data) + return string(b) +} + +type Worker interface { + Run() + Stop() + JobChannel() chan<- Job +} + +type Scheduler interface { + Name() string + JobType() string + Enabled(cfg *Config) bool + NextScheduleTime(cfg *Config, now time.Time, pendingJobs bool, lastSuccessfulJob *Job) *time.Time + ScheduleJob(cfg *Config, pendingJobs bool, lastSuccessfulJob *Job) (*Job, *AppError) +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/ldap.go b/vendor/github.com/mattermost/mattermost-server/model/ldap.go new file mode 100644 index 00000000..1453a4ad --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/ldap.go @@ -0,0 +1,9 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +const ( + USER_AUTH_SERVICE_LDAP = "ldap" + LDAP_SYNC_TASK_NAME = "LDAP Syncronization" +) diff --git a/vendor/github.com/mattermost/mattermost-server/model/license.go b/vendor/github.com/mattermost/mattermost-server/model/license.go new file mode 100644 index 00000000..942a18d5 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/license.go @@ -0,0 +1,219 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" +) + +const ( + EXPIRED_LICENSE_ERROR = "api.license.add_license.expired.app_error" + INVALID_LICENSE_ERROR = "api.license.add_license.invalid.app_error" +) + +type LicenseRecord struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at"` + Bytes string `json:"-"` +} + +type License struct { + Id string `json:"id"` + IssuedAt int64 `json:"issued_at"` + StartsAt int64 `json:"starts_at"` + ExpiresAt int64 `json:"expires_at"` + Customer *Customer `json:"customer"` + Features *Features `json:"features"` +} + +type Customer struct { + Id string `json:"id"` + Name string `json:"name"` + Email string `json:"email"` + Company string `json:"company"` + PhoneNumber string `json:"phone_number"` +} + +type Features struct { + Users *int `json:"users"` + LDAP *bool `json:"ldap"` + MFA *bool `json:"mfa"` + GoogleOAuth *bool `json:"google_oauth"` + Office365OAuth *bool `json:"office365_oauth"` + Compliance *bool `json:"compliance"` + Cluster *bool `json:"cluster"` + Metrics *bool `json:"metrics"` + CustomBrand *bool `json:"custom_brand"` + MHPNS *bool `json:"mhpns"` + SAML *bool `json:"saml"` + PasswordRequirements *bool `json:"password_requirements"` + Elasticsearch *bool `json:"elastic_search"` + Announcement *bool `json:"announcement"` + ThemeManagement *bool `json:"theme_management"` + EmailNotificationContents *bool `json:"email_notification_contents"` + DataRetention *bool `json:"data_retention"` + MessageExport *bool `json:"message_export"` + + // after we enabled more features for webrtc we'll need to control them with this + FutureFeatures *bool `json:"future_features"` +} + +func (f *Features) ToMap() map[string]interface{} { + return map[string]interface{}{ + "ldap": *f.LDAP, + "mfa": *f.MFA, + "google": *f.GoogleOAuth, + "office365": *f.Office365OAuth, + "compliance": *f.Compliance, + "cluster": *f.Cluster, + "metrics": *f.Metrics, + "custom_brand": *f.CustomBrand, + "mhpns": *f.MHPNS, + "saml": *f.SAML, + "password": *f.PasswordRequirements, + "elastic_search": *f.Elasticsearch, + "email_notification_contents": *f.EmailNotificationContents, + "data_retention": *f.DataRetention, + "message_export": *f.MessageExport, + "future": *f.FutureFeatures, + } +} + +func (f *Features) SetDefaults() { + if f.FutureFeatures == nil { + f.FutureFeatures = NewBool(true) + } + + if f.Users == nil { + f.Users = NewInt(0) + } + + if f.LDAP == nil { + f.LDAP = NewBool(*f.FutureFeatures) + } + + if f.MFA == nil { + f.MFA = NewBool(*f.FutureFeatures) + } + + if f.GoogleOAuth == nil { + f.GoogleOAuth = NewBool(*f.FutureFeatures) + } + + if f.Office365OAuth == nil { + f.Office365OAuth = NewBool(*f.FutureFeatures) + } + + if f.Compliance == nil { + f.Compliance = NewBool(*f.FutureFeatures) + } + + if f.Cluster == nil { + f.Cluster = NewBool(*f.FutureFeatures) + } + + if f.Metrics == nil { + f.Metrics = NewBool(*f.FutureFeatures) + } + + if f.CustomBrand == nil { + f.CustomBrand = NewBool(*f.FutureFeatures) + } + + if f.MHPNS == nil { + f.MHPNS = NewBool(*f.FutureFeatures) + } + + if f.SAML == nil { + f.SAML = NewBool(*f.FutureFeatures) + } + + if f.PasswordRequirements == nil { + f.PasswordRequirements = NewBool(*f.FutureFeatures) + } + + if f.Elasticsearch == nil { + f.Elasticsearch = NewBool(*f.FutureFeatures) + } + + if f.Announcement == nil { + f.Announcement = NewBool(true) + } + + if f.ThemeManagement == nil { + f.ThemeManagement = NewBool(true) + } + + if f.EmailNotificationContents == nil { + f.EmailNotificationContents = NewBool(*f.FutureFeatures) + } + + if f.DataRetention == nil { + f.DataRetention = NewBool(*f.FutureFeatures) + } + + if f.MessageExport == nil { + f.MessageExport = NewBool(*f.FutureFeatures) + } +} + +func (l *License) IsExpired() bool { + return l.ExpiresAt < GetMillis() +} + +func (l *License) IsStarted() bool { + return l.StartsAt < GetMillis() +} + +func (l *License) ToJson() string { + b, _ := json.Marshal(l) + return string(b) +} + +// NewTestLicense returns a license that expires in the future and has the given features. +func NewTestLicense(features ...string) *License { + ret := &License{ + ExpiresAt: GetMillis() + 90*24*60*60*1000, + Customer: &Customer{}, + Features: &Features{}, + } + ret.Features.SetDefaults() + + featureMap := map[string]bool{} + for _, feature := range features { + featureMap[feature] = true + } + featureJson, _ := json.Marshal(featureMap) + json.Unmarshal(featureJson, &ret.Features) + + return ret +} + +func LicenseFromJson(data io.Reader) *License { + var o *License + json.NewDecoder(data).Decode(&o) + return o +} + +func (lr *LicenseRecord) IsValid() *AppError { + if len(lr.Id) != 26 { + return NewAppError("LicenseRecord.IsValid", "model.license_record.is_valid.id.app_error", nil, "", http.StatusBadRequest) + } + + if lr.CreateAt == 0 { + return NewAppError("LicenseRecord.IsValid", "model.license_record.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) + } + + if len(lr.Bytes) == 0 || len(lr.Bytes) > 10000 { + return NewAppError("LicenseRecord.IsValid", "model.license_record.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (lr *LicenseRecord) PreSave() { + lr.CreateAt = GetMillis() +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/manifest.go b/vendor/github.com/mattermost/mattermost-server/model/manifest.go new file mode 100644 index 00000000..5ba4854b --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/manifest.go @@ -0,0 +1,228 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + + "gopkg.in/yaml.v2" +) + +const ( + PLUGIN_CONFIG_TYPE_TEXT = "text" + PLUGIN_CONFIG_TYPE_BOOL = "bool" + PLUGIN_CONFIG_TYPE_RADIO = "radio" + PLUGIN_CONFIG_TYPE_DROPDOWN = "dropdown" + PLUGIN_CONFIG_TYPE_GENERATED = "generated" + PLUGIN_CONFIG_TYPE_USERNAME = "username" +) + +type PluginOption struct { + // The display name for the option. + DisplayName string `json:"display_name" yaml:"display_name"` + + // The string value for the option. + Value string `json:"value" yaml:"value"` +} + +type PluginSetting struct { + // The key that the setting will be assigned to in the configuration file. + Key string `json:"key" yaml:"key"` + + // The display name for the setting. + DisplayName string `json:"display_name" yaml:"display_name"` + + // The type of the setting. + // + // "bool" will result in a boolean true or false setting. + // + // "dropdown" will result in a string setting that allows the user to select from a list of + // pre-defined options. + // + // "generated" will result in a string setting that is set to a random, cryptographically secure + // string. + // + // "radio" will result in a string setting that allows the user to select from a short selection + // of pre-defined options. + // + // "text" will result in a string setting that can be typed in manually. + // + // "username" will result in a text setting that will autocomplete to a username. + Type string `json:"type" yaml:"type"` + + // The help text to display to the user. + HelpText string `json:"help_text" yaml:"help_text"` + + // The help text to display alongside the "Regenerate" button for settings of the "generated" type. + RegenerateHelpText string `json:"regenerate_help_text,omitempty" yaml:"regenerate_help_text,omitempty"` + + // The placeholder to display for "text", "generated" and "username" types when blank. + Placeholder string `json:"placeholder" yaml:"placeholder"` + + // The default value of the setting. + Default interface{} `json:"default" yaml:"default"` + + // For "radio" or "dropdown" settings, this is the list of pre-defined options that the user can choose + // from. + Options []*PluginOption `json:"options,omitempty" yaml:"options,omitempty"` +} + +type PluginSettingsSchema struct { + // Optional text to display above the settings. + Header string `json:"header" yaml:"header"` + + // Optional text to display below the settings. + Footer string `json:"footer" yaml:"footer"` + + // A list of setting definitions. + Settings []*PluginSetting `json:"settings" yaml:"settings"` +} + +// The plugin manifest defines the metadata required to load and present your plugin. The manifest +// file should be named plugin.json or plugin.yaml and placed in the top of your +// plugin bundle. +// +// Example plugin.yaml: +// +// id: com.mycompany.myplugin +// name: My Plugin +// description: This is my plugin. It does stuff. +// backend: +// executable: myplugin +// settings_schema: +// settings: +// - key: enable_extra_thing +// type: bool +// display_name: Enable Extra Thing +// help_text: When true, an extra thing will be enabled! +// default: false +type Manifest struct { + // The id is a globally unique identifier that represents your plugin. Ids are limited + // to 190 characters. Reverse-DNS notation using a name you control is a good option. + // For example, "com.mycompany.myplugin". + Id string `json:"id" yaml:"id"` + + // The name to be displayed for the plugin. + Name string `json:"name,omitempty" yaml:"name,omitempty"` + + // A description of what your plugin is and does. + Description string `json:"description,omitempty" yaml:"description,omitempty"` + + // A version number for your plugin. Semantic versioning is recommended: http://semver.org + Version string `json:"version" yaml:"version"` + + // If your plugin extends the server, you'll need define backend. + Backend *ManifestBackend `json:"backend,omitempty" yaml:"backend,omitempty"` + + // If your plugin extends the web app, you'll need to define webapp. + Webapp *ManifestWebapp `json:"webapp,omitempty" yaml:"webapp,omitempty"` + + // To allow administrators to configure your plugin via the Mattermost system console, you can + // provide your settings schema. + SettingsSchema *PluginSettingsSchema `json:"settings_schema,omitempty" yaml:"settings_schema,omitempty"` +} + +type ManifestBackend struct { + // The path to your executable binary. This should be relative to the root of your bundle and the + // location of the manifest file. + // + // On Windows, this file must have a ".exe" extension. + Executable string `json:"executable" yaml:"executable"` +} + +type ManifestWebapp struct { + // The path to your webapp bundle. This should be relative to the root of your bundle and the + // location of the manifest file. + BundlePath string `json:"bundle_path" yaml:"bundle_path"` +} + +func (m *Manifest) ToJson() string { + b, _ := json.Marshal(m) + return string(b) +} + +func ManifestListToJson(m []*Manifest) string { + b, _ := json.Marshal(m) + return string(b) +} + +func ManifestFromJson(data io.Reader) *Manifest { + var m *Manifest + json.NewDecoder(data).Decode(&m) + return m +} + +func ManifestListFromJson(data io.Reader) []*Manifest { + var manifests []*Manifest + json.NewDecoder(data).Decode(&manifests) + return manifests +} + +func (m *Manifest) HasClient() bool { + return m.Webapp != nil +} + +func (m *Manifest) ClientManifest() *Manifest { + cm := new(Manifest) + *cm = *m + cm.Name = "" + cm.Description = "" + cm.Backend = nil + return cm +} + +// FindManifest will find and parse the manifest in a given directory. +// +// In all cases other than a does-not-exist error, path is set to the path of the manifest file that was +// found. +// +// Manifests are JSON or YAML files named plugin.json, plugin.yaml, or plugin.yml. +func FindManifest(dir string) (manifest *Manifest, path string, err error) { + for _, name := range []string{"plugin.yml", "plugin.yaml"} { + path = filepath.Join(dir, name) + f, ferr := os.Open(path) + if ferr != nil { + if !os.IsNotExist(ferr) { + err = ferr + return + } + continue + } + b, ioerr := ioutil.ReadAll(f) + f.Close() + if ioerr != nil { + err = ioerr + return + } + var parsed Manifest + err = yaml.Unmarshal(b, &parsed) + if err != nil { + return + } + manifest = &parsed + return + } + + path = filepath.Join(dir, "plugin.json") + f, ferr := os.Open(path) + if ferr != nil { + if os.IsNotExist(ferr) { + path = "" + } + err = ferr + return + } + defer f.Close() + var parsed Manifest + err = json.NewDecoder(f).Decode(&parsed) + if err != nil { + return + } + manifest = &parsed + return +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/message_export.go b/vendor/github.com/mattermost/mattermost-server/model/message_export.go new file mode 100644 index 00000000..22641dee --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/message_export.go @@ -0,0 +1,19 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +type MessageExport struct { + ChannelId *string + ChannelDisplayName *string + + UserId *string + UserEmail *string + Username *string + + PostId *string + PostCreateAt *int64 + PostMessage *string + PostType *string + PostFileIds StringArray +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/mfa_secret.go b/vendor/github.com/mattermost/mattermost-server/model/mfa_secret.go new file mode 100644 index 00000000..23a903c8 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/mfa_secret.go @@ -0,0 +1,25 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type MfaSecret struct { + Secret string `json:"secret"` + QRCode string `json:"qr_code"` +} + +func (me *MfaSecret) ToJson() string { + b, _ := json.Marshal(me) + return string(b) +} + +func MfaSecretFromJson(data io.Reader) *MfaSecret { + var me *MfaSecret + json.NewDecoder(data).Decode(&me) + return me +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/oauth.go b/vendor/github.com/mattermost/mattermost-server/model/oauth.go new file mode 100644 index 00000000..70e8a3f2 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/oauth.go @@ -0,0 +1,164 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "unicode/utf8" +) + +const ( + OAUTH_ACTION_SIGNUP = "signup" + OAUTH_ACTION_LOGIN = "login" + OAUTH_ACTION_EMAIL_TO_SSO = "email_to_sso" + OAUTH_ACTION_SSO_TO_EMAIL = "sso_to_email" + OAUTH_ACTION_MOBILE = "mobile" +) + +type OAuthApp struct { + Id string `json:"id"` + CreatorId string `json:"creator_id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + ClientSecret string `json:"client_secret"` + Name string `json:"name"` + Description string `json:"description"` + IconURL string `json:"icon_url"` + CallbackUrls StringArray `json:"callback_urls"` + Homepage string `json:"homepage"` + IsTrusted bool `json:"is_trusted"` +} + +// IsValid validates the app and returns an error if it isn't configured +// correctly. +func (a *OAuthApp) IsValid() *AppError { + + if len(a.Id) != 26 { + return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.app_id.app_error", nil, "", http.StatusBadRequest) + } + + if a.CreateAt == 0 { + return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.create_at.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) + } + + if a.UpdateAt == 0 { + return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.update_at.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) + } + + if len(a.CreatorId) != 26 { + return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.creator_id.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) + } + + if len(a.ClientSecret) == 0 || len(a.ClientSecret) > 128 { + return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.client_secret.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) + } + + if len(a.Name) == 0 || len(a.Name) > 64 { + return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.name.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) + } + + if len(a.CallbackUrls) == 0 || len(fmt.Sprintf("%s", a.CallbackUrls)) > 1024 { + return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.callback.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) + } + + for _, callback := range a.CallbackUrls { + if !IsValidHttpUrl(callback) { + return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.callback.app_error", nil, "", http.StatusBadRequest) + } + } + + if len(a.Homepage) == 0 || len(a.Homepage) > 256 || !IsValidHttpUrl(a.Homepage) { + return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.homepage.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) + } + + if utf8.RuneCountInString(a.Description) > 512 { + return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.description.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) + } + + if len(a.IconURL) > 0 { + if len(a.IconURL) > 512 || !IsValidHttpUrl(a.IconURL) { + return NewAppError("OAuthApp.IsValid", "model.oauth.is_valid.icon_url.app_error", nil, "app_id="+a.Id, http.StatusBadRequest) + } + } + + return nil +} + +// PreSave will set the Id and ClientSecret if missing. It will also fill +// in the CreateAt, UpdateAt times. It should be run before saving the app to the db. +func (a *OAuthApp) PreSave() { + if a.Id == "" { + a.Id = NewId() + } + + if a.ClientSecret == "" { + a.ClientSecret = NewId() + } + + a.CreateAt = GetMillis() + a.UpdateAt = a.CreateAt +} + +// PreUpdate should be run before updating the app in the db. +func (a *OAuthApp) PreUpdate() { + a.UpdateAt = GetMillis() +} + +// ToJson convert a User to a json string +func (a *OAuthApp) ToJson() string { + b, _ := json.Marshal(a) + return string(b) +} + +// Generate a valid strong etag so the browser can cache the results +func (a *OAuthApp) Etag() string { + return Etag(a.Id, a.UpdateAt) +} + +// Remove any private data from the app object +func (a *OAuthApp) Sanitize() { + a.ClientSecret = "" +} + +func (a *OAuthApp) IsValidRedirectURL(url string) bool { + for _, u := range a.CallbackUrls { + if u == url { + return true + } + } + + return false +} + +// OAuthAppFromJson will decode the input and return a User +func OAuthAppFromJson(data io.Reader) *OAuthApp { + var app *OAuthApp + json.NewDecoder(data).Decode(&app) + return app +} + +func OAuthAppMapToJson(a map[string]*OAuthApp) string { + b, _ := json.Marshal(a) + return string(b) +} + +func OAuthAppMapFromJson(data io.Reader) map[string]*OAuthApp { + var apps map[string]*OAuthApp + json.NewDecoder(data).Decode(&apps) + return apps +} + +func OAuthAppListToJson(l []*OAuthApp) string { + b, _ := json.Marshal(l) + return string(b) +} + +func OAuthAppListFromJson(data io.Reader) []*OAuthApp { + var o []*OAuthApp + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/outgoing_webhook.go b/vendor/github.com/mattermost/mattermost-server/model/outgoing_webhook.go new file mode 100644 index 00000000..b5dbf34d --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/outgoing_webhook.go @@ -0,0 +1,254 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +type OutgoingWebhook struct { + Id string `json:"id"` + Token string `json:"token"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + CreatorId string `json:"creator_id"` + ChannelId string `json:"channel_id"` + TeamId string `json:"team_id"` + TriggerWords StringArray `json:"trigger_words"` + TriggerWhen int `json:"trigger_when"` + CallbackURLs StringArray `json:"callback_urls"` + DisplayName string `json:"display_name"` + Description string `json:"description"` + ContentType string `json:"content_type"` +} + +type OutgoingWebhookPayload struct { + Token string `json:"token"` + TeamId string `json:"team_id"` + TeamDomain string `json:"team_domain"` + ChannelId string `json:"channel_id"` + ChannelName string `json:"channel_name"` + Timestamp int64 `json:"timestamp"` + UserId string `json:"user_id"` + UserName string `json:"user_name"` + PostId string `json:"post_id"` + Text string `json:"text"` + TriggerWord string `json:"trigger_word"` + FileIds string `json:"file_ids"` +} + +type OutgoingWebhookResponse struct { + Text *string `json:"text"` + Username string `json:"username"` + IconURL string `json:"icon_url"` + Props StringInterface `json:"props"` + Attachments []*SlackAttachment `json:"attachments"` + Type string `json:"type"` + ResponseType string `json:"response_type"` +} + +const OUTGOING_HOOK_RESPONSE_TYPE_COMMENT = "comment" + +func (o *OutgoingWebhookPayload) ToJSON() string { + b, _ := json.Marshal(o) + return string(b) +} + +func (o *OutgoingWebhookPayload) ToFormValues() string { + v := url.Values{} + v.Set("token", o.Token) + v.Set("team_id", o.TeamId) + v.Set("team_domain", o.TeamDomain) + v.Set("channel_id", o.ChannelId) + v.Set("channel_name", o.ChannelName) + v.Set("timestamp", strconv.FormatInt(o.Timestamp/1000, 10)) + v.Set("user_id", o.UserId) + v.Set("user_name", o.UserName) + v.Set("post_id", o.PostId) + v.Set("text", o.Text) + v.Set("trigger_word", o.TriggerWord) + v.Set("file_ids", o.FileIds) + + return v.Encode() +} + +func (o *OutgoingWebhook) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func OutgoingWebhookFromJson(data io.Reader) *OutgoingWebhook { + var o *OutgoingWebhook + json.NewDecoder(data).Decode(&o) + return o +} + +func OutgoingWebhookListToJson(l []*OutgoingWebhook) string { + b, _ := json.Marshal(l) + return string(b) +} + +func OutgoingWebhookListFromJson(data io.Reader) []*OutgoingWebhook { + var o []*OutgoingWebhook + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *OutgoingWebhookResponse) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func OutgoingWebhookResponseFromJson(data io.Reader) *OutgoingWebhookResponse { + var o *OutgoingWebhookResponse + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *OutgoingWebhook) IsValid() *AppError { + + if len(o.Id) != 26 { + return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.Token) != 26 { + return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.token.app_error", nil, "", http.StatusBadRequest) + } + + if o.CreateAt == 0 { + return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if o.UpdateAt == 0 { + return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if len(o.CreatorId) != 26 { + return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.ChannelId) != 0 && len(o.ChannelId) != 26 { + return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.TeamId) != 26 { + return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.team_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(fmt.Sprintf("%s", o.TriggerWords)) > 1024 { + return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.words.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.TriggerWords) != 0 { + for _, triggerWord := range o.TriggerWords { + if len(triggerWord) == 0 { + return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.trigger_words.app_error", nil, "", http.StatusBadRequest) + } + } + } + + if len(o.CallbackURLs) == 0 || len(fmt.Sprintf("%s", o.CallbackURLs)) > 1024 { + return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.callback.app_error", nil, "", http.StatusBadRequest) + } + + for _, callback := range o.CallbackURLs { + if !IsValidHttpUrl(callback) { + return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.url.app_error", nil, "", http.StatusBadRequest) + } + } + + if len(o.DisplayName) > 64 { + return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.display_name.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.Description) > 128 { + return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.description.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.ContentType) > 128 { + return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.content_type.app_error", nil, "", http.StatusBadRequest) + } + + if o.TriggerWhen > 1 { + return NewAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.content_type.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (o *OutgoingWebhook) PreSave() { + if o.Id == "" { + o.Id = NewId() + } + + if o.Token == "" { + o.Token = NewId() + } + + o.CreateAt = GetMillis() + o.UpdateAt = o.CreateAt +} + +func (o *OutgoingWebhook) PreUpdate() { + o.UpdateAt = GetMillis() +} + +func (o *OutgoingWebhook) TriggerWordExactMatch(word string) bool { + if len(word) == 0 { + return false + } + + for _, trigger := range o.TriggerWords { + if trigger == word { + return true + } + } + + return false +} + +func (o *OutgoingWebhook) TriggerWordStartsWith(word string) bool { + if len(word) == 0 { + return false + } + + for _, trigger := range o.TriggerWords { + if strings.HasPrefix(word, trigger) { + return true + } + } + + return false +} + +func (o *OutgoingWebhook) GetTriggerWord(word string, isExactMatch bool) (triggerWord string) { + if len(word) == 0 { + return + } + + if isExactMatch { + for _, trigger := range o.TriggerWords { + if trigger == word { + triggerWord = trigger + break + } + } + } else { + for _, trigger := range o.TriggerWords { + if strings.HasPrefix(word, trigger) { + triggerWord = trigger + break + } + } + } + + return triggerWord +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/plugin_key_value.go b/vendor/github.com/mattermost/mattermost-server/model/plugin_key_value.go new file mode 100644 index 00000000..b7a7731c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/plugin_key_value.go @@ -0,0 +1,32 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "net/http" + "unicode/utf8" +) + +const ( + KEY_VALUE_PLUGIN_ID_MAX_RUNES = 190 + KEY_VALUE_KEY_MAX_RUNES = 50 +) + +type PluginKeyValue struct { + PluginId string `json:"plugin_id"` + Key string `json:"key" db:"PKey"` + Value []byte `json:"value" db:"PValue"` +} + +func (kv *PluginKeyValue) IsValid() *AppError { + if len(kv.PluginId) == 0 || utf8.RuneCountInString(kv.PluginId) > KEY_VALUE_PLUGIN_ID_MAX_RUNES { + return NewAppError("PluginKeyValue.IsValid", "model.plugin_key_value.is_valid.plugin_id.app_error", map[string]interface{}{"Max": KEY_VALUE_KEY_MAX_RUNES, "Min": 0}, "key="+kv.Key, http.StatusBadRequest) + } + + if len(kv.Key) == 0 || utf8.RuneCountInString(kv.Key) > KEY_VALUE_KEY_MAX_RUNES { + return NewAppError("PluginKeyValue.IsValid", "model.plugin_key_value.is_valid.key.app_error", map[string]interface{}{"Max": KEY_VALUE_KEY_MAX_RUNES, "Min": 0}, "key="+kv.Key, http.StatusBadRequest) + } + + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/plugins_response.go b/vendor/github.com/mattermost/mattermost-server/model/plugins_response.go new file mode 100644 index 00000000..b6c01b64 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/plugins_response.go @@ -0,0 +1,30 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type PluginInfo struct { + Manifest + Prepackaged bool `json:"prepackaged"` +} + +type PluginsResponse struct { + Active []*PluginInfo `json:"active"` + Inactive []*PluginInfo `json:"inactive"` +} + +func (m *PluginsResponse) ToJson() string { + b, _ := json.Marshal(m) + return string(b) +} + +func PluginsResponseFromJson(data io.Reader) *PluginsResponse { + var m *PluginsResponse + json.NewDecoder(data).Decode(&m) + return m +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/post.go b/vendor/github.com/mattermost/mattermost-server/model/post.go new file mode 100644 index 00000000..4a774b5d --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/post.go @@ -0,0 +1,492 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" + "regexp" + "sort" + "strings" + "unicode/utf8" + + "github.com/mattermost/mattermost-server/utils/markdown" +) + +const ( + POST_SYSTEM_MESSAGE_PREFIX = "system_" + POST_DEFAULT = "" + POST_SLACK_ATTACHMENT = "slack_attachment" + POST_SYSTEM_GENERIC = "system_generic" + POST_JOIN_LEAVE = "system_join_leave" // Deprecated, use POST_JOIN_CHANNEL or POST_LEAVE_CHANNEL instead + POST_JOIN_CHANNEL = "system_join_channel" + POST_LEAVE_CHANNEL = "system_leave_channel" + POST_JOIN_TEAM = "system_join_team" + POST_LEAVE_TEAM = "system_leave_team" + POST_ADD_REMOVE = "system_add_remove" // Deprecated, use POST_ADD_TO_CHANNEL or POST_REMOVE_FROM_CHANNEL instead + POST_ADD_TO_CHANNEL = "system_add_to_channel" + POST_REMOVE_FROM_CHANNEL = "system_remove_from_channel" + POST_MOVE_CHANNEL = "system_move_channel" + POST_ADD_TO_TEAM = "system_add_to_team" + POST_REMOVE_FROM_TEAM = "system_remove_from_team" + POST_HEADER_CHANGE = "system_header_change" + POST_DISPLAYNAME_CHANGE = "system_displayname_change" + POST_PURPOSE_CHANGE = "system_purpose_change" + POST_CHANNEL_DELETED = "system_channel_deleted" + POST_EPHEMERAL = "system_ephemeral" + POST_CHANGE_CHANNEL_PRIVACY = "system_change_chan_privacy" + POST_FILEIDS_MAX_RUNES = 150 + POST_FILENAMES_MAX_RUNES = 4000 + POST_HASHTAGS_MAX_RUNES = 1000 + POST_MESSAGE_MAX_RUNES = 4000 + POST_PROPS_MAX_RUNES = 8000 + POST_PROPS_MAX_USER_RUNES = POST_PROPS_MAX_RUNES - 400 // Leave some room for system / pre-save modifications + POST_CUSTOM_TYPE_PREFIX = "custom_" + PROPS_ADD_CHANNEL_MEMBER = "add_channel_member" +) + +type Post struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + EditAt int64 `json:"edit_at"` + DeleteAt int64 `json:"delete_at"` + IsPinned bool `json:"is_pinned"` + UserId string `json:"user_id"` + ChannelId string `json:"channel_id"` + RootId string `json:"root_id"` + ParentId string `json:"parent_id"` + OriginalId string `json:"original_id"` + + Message string `json:"message"` + + // MessageSource will contain the message as submitted by the user if Message has been modified + // by Mattermost for presentation (e.g if an image proxy is being used). It should be used to + // populate edit boxes if present. + MessageSource string `json:"message_source,omitempty" db:"-"` + + Type string `json:"type"` + Props StringInterface `json:"props"` + Hashtags string `json:"hashtags"` + Filenames StringArray `json:"filenames,omitempty"` // Deprecated, do not use this field any more + FileIds StringArray `json:"file_ids,omitempty"` + PendingPostId string `json:"pending_post_id" db:"-"` + HasReactions bool `json:"has_reactions,omitempty"` +} + +type PostPatch struct { + IsPinned *bool `json:"is_pinned"` + Message *string `json:"message"` + Props *StringInterface `json:"props"` + FileIds *StringArray `json:"file_ids"` + HasReactions *bool `json:"has_reactions"` +} + +func (o *PostPatch) WithRewrittenImageURLs(f func(string) string) *PostPatch { + copy := *o + if copy.Message != nil { + *copy.Message = RewriteImageURLs(*o.Message, f) + } + return © +} + +type PostForIndexing struct { + Post + TeamId string `json:"team_id"` + ParentCreateAt *int64 `json:"parent_create_at"` +} + +type PostAction struct { + Id string `json:"id"` + Name string `json:"name"` + Integration *PostActionIntegration `json:"integration,omitempty"` +} + +type PostActionIntegration struct { + URL string `json:"url,omitempty"` + Context StringInterface `json:"context,omitempty"` +} + +type PostActionIntegrationRequest struct { + UserId string `json:"user_id"` + Context StringInterface `json:"context,omitempty"` +} + +type PostActionIntegrationResponse struct { + Update *Post `json:"update"` + EphemeralText string `json:"ephemeral_text"` +} + +func (o *Post) ToJson() string { + copy := *o + copy.StripActionIntegrations() + b, _ := json.Marshal(©) + return string(b) +} + +func (o *Post) ToUnsanitizedJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func PostFromJson(data io.Reader) *Post { + var o *Post + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *Post) Etag() string { + return Etag(o.Id, o.UpdateAt) +} + +func (o *Post) IsValid() *AppError { + + if len(o.Id) != 26 { + return NewAppError("Post.IsValid", "model.post.is_valid.id.app_error", nil, "", http.StatusBadRequest) + } + + if o.CreateAt == 0 { + return NewAppError("Post.IsValid", "model.post.is_valid.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if o.UpdateAt == 0 { + return NewAppError("Post.IsValid", "model.post.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if len(o.UserId) != 26 { + return NewAppError("Post.IsValid", "model.post.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.ChannelId) != 26 { + return NewAppError("Post.IsValid", "model.post.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest) + } + + if !(len(o.RootId) == 26 || len(o.RootId) == 0) { + return NewAppError("Post.IsValid", "model.post.is_valid.root_id.app_error", nil, "", http.StatusBadRequest) + } + + if !(len(o.ParentId) == 26 || len(o.ParentId) == 0) { + return NewAppError("Post.IsValid", "model.post.is_valid.parent_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.ParentId) == 26 && len(o.RootId) == 0 { + return NewAppError("Post.IsValid", "model.post.is_valid.root_parent.app_error", nil, "", http.StatusBadRequest) + } + + if !(len(o.OriginalId) == 26 || len(o.OriginalId) == 0) { + return NewAppError("Post.IsValid", "model.post.is_valid.original_id.app_error", nil, "", http.StatusBadRequest) + } + + if utf8.RuneCountInString(o.Message) > POST_MESSAGE_MAX_RUNES { + return NewAppError("Post.IsValid", "model.post.is_valid.msg.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if utf8.RuneCountInString(o.Hashtags) > POST_HASHTAGS_MAX_RUNES { + return NewAppError("Post.IsValid", "model.post.is_valid.hashtags.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + switch o.Type { + case + POST_DEFAULT, + POST_JOIN_LEAVE, + POST_ADD_REMOVE, + POST_JOIN_CHANNEL, + POST_LEAVE_CHANNEL, + POST_JOIN_TEAM, + POST_LEAVE_TEAM, + POST_ADD_TO_CHANNEL, + POST_REMOVE_FROM_CHANNEL, + POST_MOVE_CHANNEL, + POST_ADD_TO_TEAM, + POST_REMOVE_FROM_TEAM, + POST_SLACK_ATTACHMENT, + POST_HEADER_CHANGE, + POST_PURPOSE_CHANGE, + POST_DISPLAYNAME_CHANGE, + POST_CHANNEL_DELETED, + POST_CHANGE_CHANNEL_PRIVACY: + default: + if !strings.HasPrefix(o.Type, POST_CUSTOM_TYPE_PREFIX) { + return NewAppError("Post.IsValid", "model.post.is_valid.type.app_error", nil, "id="+o.Type, http.StatusBadRequest) + } + } + + if utf8.RuneCountInString(ArrayToJson(o.Filenames)) > POST_FILENAMES_MAX_RUNES { + return NewAppError("Post.IsValid", "model.post.is_valid.filenames.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if utf8.RuneCountInString(ArrayToJson(o.FileIds)) > POST_FILEIDS_MAX_RUNES { + return NewAppError("Post.IsValid", "model.post.is_valid.file_ids.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if utf8.RuneCountInString(StringInterfaceToJson(o.Props)) > POST_PROPS_MAX_RUNES { + return NewAppError("Post.IsValid", "model.post.is_valid.props.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + return nil +} + +func (o *Post) SanitizeProps() { + membersToSanitize := []string{ + PROPS_ADD_CHANNEL_MEMBER, + } + + for _, member := range membersToSanitize { + if _, ok := o.Props[member]; ok { + delete(o.Props, member) + } + } +} + +func (o *Post) PreSave() { + if o.Id == "" { + o.Id = NewId() + } + + o.OriginalId = "" + + if o.CreateAt == 0 { + o.CreateAt = GetMillis() + } + + o.UpdateAt = o.CreateAt + o.PreCommit() +} + +func (o *Post) PreCommit() { + if o.Props == nil { + o.Props = make(map[string]interface{}) + } + + if o.Filenames == nil { + o.Filenames = []string{} + } + + if o.FileIds == nil { + o.FileIds = []string{} + } + + o.GenerateActionIds() +} + +func (o *Post) MakeNonNil() { + if o.Props == nil { + o.Props = make(map[string]interface{}) + } +} + +func (o *Post) AddProp(key string, value interface{}) { + + o.MakeNonNil() + + o.Props[key] = value +} + +func (o *Post) IsSystemMessage() bool { + return len(o.Type) >= len(POST_SYSTEM_MESSAGE_PREFIX) && o.Type[:len(POST_SYSTEM_MESSAGE_PREFIX)] == POST_SYSTEM_MESSAGE_PREFIX +} + +func (p *Post) Patch(patch *PostPatch) { + if patch.IsPinned != nil { + p.IsPinned = *patch.IsPinned + } + + if patch.Message != nil { + p.Message = *patch.Message + } + + if patch.Props != nil { + p.Props = *patch.Props + } + + if patch.FileIds != nil { + p.FileIds = *patch.FileIds + } + + if patch.HasReactions != nil { + p.HasReactions = *patch.HasReactions + } +} + +func (o *PostPatch) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } + + return string(b) +} + +func PostPatchFromJson(data io.Reader) *PostPatch { + decoder := json.NewDecoder(data) + var post PostPatch + err := decoder.Decode(&post) + if err != nil { + return nil + } + + return &post +} + +var channelMentionRegexp = regexp.MustCompile(`\B~[a-zA-Z0-9\-_]+`) + +func (o *Post) ChannelMentions() (names []string) { + if strings.Contains(o.Message, "~") { + alreadyMentioned := make(map[string]bool) + for _, match := range channelMentionRegexp.FindAllString(o.Message, -1) { + name := match[1:] + if !alreadyMentioned[name] { + names = append(names, name) + alreadyMentioned[name] = true + } + } + } + return +} + +func (r *PostActionIntegrationRequest) ToJson() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (o *Post) Attachments() []*SlackAttachment { + if attachments, ok := o.Props["attachments"].([]*SlackAttachment); ok { + return attachments + } + var ret []*SlackAttachment + if attachments, ok := o.Props["attachments"].([]interface{}); ok { + for _, attachment := range attachments { + if enc, err := json.Marshal(attachment); err == nil { + var decoded SlackAttachment + if json.Unmarshal(enc, &decoded) == nil { + ret = append(ret, &decoded) + } + } + } + } + return ret +} + +func (o *Post) StripActionIntegrations() { + attachments := o.Attachments() + if o.Props["attachments"] != nil { + o.Props["attachments"] = attachments + } + for _, attachment := range attachments { + for _, action := range attachment.Actions { + action.Integration = nil + } + } +} + +func (o *Post) GetAction(id string) *PostAction { + for _, attachment := range o.Attachments() { + for _, action := range attachment.Actions { + if action.Id == id { + return action + } + } + } + return nil +} + +func (o *Post) GenerateActionIds() { + if o.Props["attachments"] != nil { + o.Props["attachments"] = o.Attachments() + } + if attachments, ok := o.Props["attachments"].([]*SlackAttachment); ok { + for _, attachment := range attachments { + for _, action := range attachment.Actions { + if action.Id == "" { + action.Id = NewId() + } + } + } + } +} + +var markdownDestinationEscaper = strings.NewReplacer( + `\`, `\\`, + `<`, `\<`, + `>`, `\>`, + `(`, `\(`, + `)`, `\)`, +) + +// WithRewrittenImageURLs returns a new shallow copy of the post where the message has been +// rewritten via RewriteImageURLs. +func (o *Post) WithRewrittenImageURLs(f func(string) string) *Post { + copy := *o + copy.Message = RewriteImageURLs(o.Message, f) + if copy.MessageSource == "" && copy.Message != o.Message { + copy.MessageSource = o.Message + } + return © +} + +// RewriteImageURLs takes a message and returns a copy that has all of the image URLs replaced +// according to the function f. For each image URL, f will be invoked, and the resulting markdown +// will contain the URL returned by that invocation instead. +// +// Image URLs are destination URLs used in inline images or reference definitions that are used +// anywhere in the input markdown as an image. +func RewriteImageURLs(message string, f func(string) string) string { + if !strings.Contains(message, "![") { + return message + } + + var ranges []markdown.Range + + markdown.Inspect(message, func(blockOrInline interface{}) bool { + switch v := blockOrInline.(type) { + case *markdown.ReferenceImage: + ranges = append(ranges, v.ReferenceDefinition.RawDestination) + case *markdown.InlineImage: + ranges = append(ranges, v.RawDestination) + default: + return true + } + return true + }) + + if ranges == nil { + return message + } + + sort.Slice(ranges, func(i, j int) bool { + return ranges[i].Position < ranges[j].Position + }) + + copyRanges := make([]markdown.Range, 0, len(ranges)) + urls := make([]string, 0, len(ranges)) + resultLength := len(message) + + start := 0 + for i, r := range ranges { + switch { + case i == 0: + case r.Position != ranges[i-1].Position: + start = ranges[i-1].End + default: + continue + } + original := message[r.Position:r.End] + replacement := markdownDestinationEscaper.Replace(f(markdown.Unescape(original))) + resultLength += len(replacement) - len(original) + copyRanges = append(copyRanges, markdown.Range{Position: start, End: r.Position}) + urls = append(urls, replacement) + } + + result := make([]byte, resultLength) + + offset := 0 + for i, r := range copyRanges { + offset += copy(result[offset:], message[r.Position:r.End]) + offset += copy(result[offset:], urls[i]) + } + copy(result[offset:], message[ranges[len(ranges)-1].End:]) + + return string(result) +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/post_list.go b/vendor/github.com/mattermost/mattermost-server/model/post_list.go new file mode 100644 index 00000000..27c22e7b --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/post_list.go @@ -0,0 +1,138 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "sort" +) + +type PostList struct { + Order []string `json:"order"` + Posts map[string]*Post `json:"posts"` +} + +func NewPostList() *PostList { + return &PostList{ + Order: make([]string, 0), + Posts: make(map[string]*Post), + } +} + +func (o *PostList) WithRewrittenImageURLs(f func(string) string) *PostList { + copy := *o + copy.Posts = make(map[string]*Post) + for id, post := range o.Posts { + copy.Posts[id] = post.WithRewrittenImageURLs(f) + } + return © +} + +func (o *PostList) StripActionIntegrations() { + posts := o.Posts + o.Posts = make(map[string]*Post) + for id, post := range posts { + pcopy := *post + pcopy.StripActionIntegrations() + o.Posts[id] = &pcopy + } +} + +func (o *PostList) ToJson() string { + copy := *o + copy.StripActionIntegrations() + b, err := json.Marshal(©) + if err != nil { + return "" + } else { + return string(b) + } +} + +func (o *PostList) MakeNonNil() { + if o.Order == nil { + o.Order = make([]string, 0) + } + + if o.Posts == nil { + o.Posts = make(map[string]*Post) + } + + for _, v := range o.Posts { + v.MakeNonNil() + } +} + +func (o *PostList) AddOrder(id string) { + + if o.Order == nil { + o.Order = make([]string, 0, 128) + } + + o.Order = append(o.Order, id) +} + +func (o *PostList) AddPost(post *Post) { + + if o.Posts == nil { + o.Posts = make(map[string]*Post) + } + + o.Posts[post.Id] = post +} + +func (o *PostList) Extend(other *PostList) { + for _, postId := range other.Order { + if _, ok := o.Posts[postId]; !ok { + o.AddPost(other.Posts[postId]) + o.AddOrder(postId) + } + } +} + +func (o *PostList) SortByCreateAt() { + sort.Slice(o.Order, func(i, j int) bool { + return o.Posts[o.Order[i]].CreateAt > o.Posts[o.Order[j]].CreateAt + }) +} + +func (o *PostList) Etag() string { + + id := "0" + var t int64 = 0 + + for _, v := range o.Posts { + if v.UpdateAt > t { + t = v.UpdateAt + id = v.Id + } else if v.UpdateAt == t && v.Id > id { + t = v.UpdateAt + id = v.Id + } + } + + orderId := "" + if len(o.Order) > 0 { + orderId = o.Order[0] + } + + return Etag(orderId, id, t) +} + +func (o *PostList) IsChannelId(channelId string) bool { + for _, v := range o.Posts { + if v.ChannelId != channelId { + return false + } + } + + return true +} + +func PostListFromJson(data io.Reader) *PostList { + var o *PostList + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/preference.go b/vendor/github.com/mattermost/mattermost-server/model/preference.go new file mode 100644 index 00000000..dc97314c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/preference.go @@ -0,0 +1,113 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + PREFERENCE_CATEGORY_DIRECT_CHANNEL_SHOW = "direct_channel_show" + PREFERENCE_CATEGORY_TUTORIAL_STEPS = "tutorial_step" + PREFERENCE_CATEGORY_ADVANCED_SETTINGS = "advanced_settings" + PREFERENCE_CATEGORY_FLAGGED_POST = "flagged_post" + PREFERENCE_CATEGORY_FAVORITE_CHANNEL = "favorite_channel" + + PREFERENCE_CATEGORY_DISPLAY_SETTINGS = "display_settings" + PREFERENCE_NAME_COLLAPSE_SETTING = "collapse_previews" + + PREFERENCE_CATEGORY_THEME = "theme" + // the name for theme props is the team id + + PREFERENCE_CATEGORY_AUTHORIZED_OAUTH_APP = "oauth_app" + // the name for oauth_app is the client_id and value is the current scope + + PREFERENCE_CATEGORY_LAST = "last" + PREFERENCE_NAME_LAST_CHANNEL = "channel" + PREFERENCE_NAME_LAST_TEAM = "team" + + PREFERENCE_CATEGORY_NOTIFICATIONS = "notifications" + PREFERENCE_NAME_EMAIL_INTERVAL = "email_interval" + + PREFERENCE_EMAIL_INTERVAL_NO_BATCHING_SECONDS = "30" // the "immediate" setting is actually 30s + PREFERENCE_EMAIL_INTERVAL_BATCHING_SECONDS = "900" // fifteen minutes is 900 seconds +) + +type Preference struct { + UserId string `json:"user_id"` + Category string `json:"category"` + Name string `json:"name"` + Value string `json:"value"` +} + +func (o *Preference) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func PreferenceFromJson(data io.Reader) *Preference { + var o *Preference + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *Preference) IsValid() *AppError { + if len(o.UserId) != 26 { + return NewAppError("Preference.IsValid", "model.preference.is_valid.id.app_error", nil, "user_id="+o.UserId, http.StatusBadRequest) + } + + if len(o.Category) == 0 || len(o.Category) > 32 { + return NewAppError("Preference.IsValid", "model.preference.is_valid.category.app_error", nil, "category="+o.Category, http.StatusBadRequest) + } + + if len(o.Name) > 32 { + return NewAppError("Preference.IsValid", "model.preference.is_valid.name.app_error", nil, "name="+o.Name, http.StatusBadRequest) + } + + if utf8.RuneCountInString(o.Value) > 2000 { + return NewAppError("Preference.IsValid", "model.preference.is_valid.value.app_error", nil, "value="+o.Value, http.StatusBadRequest) + } + + if o.Category == PREFERENCE_CATEGORY_THEME { + var unused map[string]string + if err := json.NewDecoder(strings.NewReader(o.Value)).Decode(&unused); err != nil { + return NewAppError("Preference.IsValid", "model.preference.is_valid.theme.app_error", nil, "value="+o.Value, http.StatusBadRequest) + } + } + + return nil +} + +func (o *Preference) PreUpdate() { + if o.Category == PREFERENCE_CATEGORY_THEME { + // decode the value of theme (a map of strings to string) and eliminate any invalid values + var props map[string]string + if err := json.NewDecoder(strings.NewReader(o.Value)).Decode(&props); err != nil { + // just continue, the invalid preference value should get caught by IsValid before saving + return + } + + colorPattern := regexp.MustCompile(`^#[0-9a-fA-F]{3}([0-9a-fA-F]{3})?$`) + + // blank out any invalid theme values + for name, value := range props { + if name == "image" || name == "type" || name == "codeTheme" { + continue + } + + if !colorPattern.MatchString(value) { + props[name] = "#ffffff" + } + } + + if b, err := json.Marshal(props); err == nil { + o.Value = string(b) + } + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/preferences.go b/vendor/github.com/mattermost/mattermost-server/model/preferences.go new file mode 100644 index 00000000..172e1aa8 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/preferences.go @@ -0,0 +1,27 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type Preferences []Preference + +func (o *Preferences) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func PreferencesFromJson(data io.Reader) (Preferences, error) { + decoder := json.NewDecoder(data) + var o Preferences + err := decoder.Decode(&o) + if err == nil { + return o, nil + } else { + return nil, err + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/push_notification.go b/vendor/github.com/mattermost/mattermost-server/model/push_notification.go new file mode 100644 index 00000000..0d7ba77a --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/push_notification.go @@ -0,0 +1,68 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "strings" +) + +const ( + PUSH_NOTIFY_APPLE = "apple" + PUSH_NOTIFY_ANDROID = "android" + PUSH_NOTIFY_APPLE_REACT_NATIVE = "apple_rn" + PUSH_NOTIFY_ANDROID_REACT_NATIVE = "android_rn" + + PUSH_TYPE_MESSAGE = "message" + PUSH_TYPE_CLEAR = "clear" + + // The category is set to handle a set of interactive Actions + // with the push notifications + CATEGORY_CAN_REPLY = "CAN_REPLY" + + MHPNS = "https://push.mattermost.com" +) + +type PushNotification struct { + Platform string `json:"platform"` + ServerId string `json:"server_id"` + DeviceId string `json:"device_id"` + Category string `json:"category"` + Sound string `json:"sound"` + Message string `json:"message"` + Badge int `json:"badge"` + ContentAvailable int `json:"cont_ava"` + TeamId string `json:"team_id"` + ChannelId string `json:"channel_id"` + PostId string `json:"post_id"` + RootId string `json:"root_id"` + ChannelName string `json:"channel_name"` + Type string `json:"type"` + SenderId string `json:"sender_id"` + OverrideUsername string `json:"override_username"` + OverrideIconUrl string `json:"override_icon_url"` + FromWebhook string `json:"from_webhook"` +} + +func (me *PushNotification) ToJson() string { + b, _ := json.Marshal(me) + return string(b) +} + +func (me *PushNotification) SetDeviceIdAndPlatform(deviceId string) { + + index := strings.Index(deviceId, ":") + + if index > -1 { + me.Platform = deviceId[:index] + me.DeviceId = deviceId[index+1:] + } +} + +func PushNotificationFromJson(data io.Reader) *PushNotification { + var me *PushNotification + json.NewDecoder(data).Decode(&me) + return me +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/push_response.go b/vendor/github.com/mattermost/mattermost-server/model/push_response.go new file mode 100644 index 00000000..1434a2b1 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/push_response.go @@ -0,0 +1,54 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + PUSH_STATUS = "status" + PUSH_STATUS_OK = "OK" + PUSH_STATUS_FAIL = "FAIL" + PUSH_STATUS_REMOVE = "REMOVE" + PUSH_STATUS_ERROR_MSG = "error" +) + +type PushResponse map[string]string + +func NewOkPushResponse() PushResponse { + m := make(map[string]string) + m[PUSH_STATUS] = PUSH_STATUS_OK + return m +} + +func NewRemovePushResponse() PushResponse { + m := make(map[string]string) + m[PUSH_STATUS] = PUSH_STATUS_REMOVE + return m +} + +func NewErrorPushResponse(message string) PushResponse { + m := make(map[string]string) + m[PUSH_STATUS] = PUSH_STATUS_FAIL + m[PUSH_STATUS_ERROR_MSG] = message + return m +} + +func (me *PushResponse) ToJson() string { + b, _ := json.Marshal(me) + return string(b) +} + +func PushResponseFromJson(data io.Reader) PushResponse { + decoder := json.NewDecoder(data) + + var objmap PushResponse + if err := decoder.Decode(&objmap); err != nil { + return make(map[string]string) + } else { + return objmap + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/reaction.go b/vendor/github.com/mattermost/mattermost-server/model/reaction.go new file mode 100644 index 00000000..c1b9c499 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/reaction.go @@ -0,0 +1,76 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" + "regexp" +) + +type Reaction struct { + UserId string `json:"user_id"` + PostId string `json:"post_id"` + EmojiName string `json:"emoji_name"` + CreateAt int64 `json:"create_at"` +} + +func (o *Reaction) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func ReactionFromJson(data io.Reader) *Reaction { + var o Reaction + + if err := json.NewDecoder(data).Decode(&o); err != nil { + return nil + } else { + return &o + } +} + +func ReactionsToJson(o []*Reaction) string { + b, _ := json.Marshal(o) + return string(b) +} + +func ReactionsFromJson(data io.Reader) []*Reaction { + var o []*Reaction + + if err := json.NewDecoder(data).Decode(&o); err != nil { + return nil + } else { + return o + } +} + +func (o *Reaction) IsValid() *AppError { + if len(o.UserId) != 26 { + return NewAppError("Reaction.IsValid", "model.reaction.is_valid.user_id.app_error", nil, "user_id="+o.UserId, http.StatusBadRequest) + } + + if len(o.PostId) != 26 { + return NewAppError("Reaction.IsValid", "model.reaction.is_valid.post_id.app_error", nil, "post_id="+o.PostId, http.StatusBadRequest) + } + + validName := regexp.MustCompile(`^[a-zA-Z0-9\-\+_]+$`) + + if len(o.EmojiName) == 0 || len(o.EmojiName) > EMOJI_NAME_MAX_LENGTH || !validName.MatchString(o.EmojiName) { + return NewAppError("Reaction.IsValid", "model.reaction.is_valid.emoji_name.app_error", nil, "emoji_name="+o.EmojiName, http.StatusBadRequest) + } + + if o.CreateAt == 0 { + return NewAppError("Reaction.IsValid", "model.reaction.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (o *Reaction) PreSave() { + if o.CreateAt == 0 { + o.CreateAt = GetMillis() + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/saml.go b/vendor/github.com/mattermost/mattermost-server/model/saml.go new file mode 100644 index 00000000..e7475015 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/saml.go @@ -0,0 +1,40 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + USER_AUTH_SERVICE_SAML = "saml" + USER_AUTH_SERVICE_SAML_TEXT = "With SAML" + SAML_IDP_CERTIFICATE = 1 + SAML_PRIVATE_KEY = 2 + SAML_PUBLIC_CERT = 3 +) + +type SamlAuthRequest struct { + Base64AuthRequest string + URL string + RelayState string +} + +type SamlCertificateStatus struct { + IdpCertificateFile bool `json:"idp_certificate_file"` + PrivateKeyFile bool `json:"private_key_file"` + PublicCertificateFile bool `json:"public_certificate_file"` +} + +func (s *SamlCertificateStatus) ToJson() string { + b, _ := json.Marshal(s) + return string(b) +} + +func SamlCertificateStatusFromJson(data io.Reader) *SamlCertificateStatus { + var status *SamlCertificateStatus + json.NewDecoder(data).Decode(&status) + return status +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/scheduled_task.go b/vendor/github.com/mattermost/mattermost-server/model/scheduled_task.go new file mode 100644 index 00000000..453828bd --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/scheduled_task.go @@ -0,0 +1,110 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "fmt" + "sync" + "time" +) + +type TaskFunc func() + +type ScheduledTask struct { + Name string `json:"name"` + Interval time.Duration `json:"interval"` + Recurring bool `json:"recurring"` + function TaskFunc + timer *time.Timer +} + +var taskMutex = sync.Mutex{} +var tasks = make(map[string]*ScheduledTask) + +func addTask(task *ScheduledTask) { + taskMutex.Lock() + defer taskMutex.Unlock() + tasks[task.Name] = task +} + +func removeTaskByName(name string) { + taskMutex.Lock() + defer taskMutex.Unlock() + delete(tasks, name) +} + +func GetTaskByName(name string) *ScheduledTask { + taskMutex.Lock() + defer taskMutex.Unlock() + if task, ok := tasks[name]; ok { + return task + } + return nil +} + +func GetAllTasks() *map[string]*ScheduledTask { + taskMutex.Lock() + defer taskMutex.Unlock() + return &tasks +} + +func CreateTask(name string, function TaskFunc, timeToExecution time.Duration) *ScheduledTask { + task := &ScheduledTask{ + Name: name, + Interval: timeToExecution, + Recurring: false, + function: function, + } + + taskRunner := func() { + go task.function() + removeTaskByName(task.Name) + } + + task.timer = time.AfterFunc(timeToExecution, taskRunner) + + addTask(task) + + return task +} + +func CreateRecurringTask(name string, function TaskFunc, interval time.Duration) *ScheduledTask { + task := &ScheduledTask{ + Name: name, + Interval: interval, + Recurring: true, + function: function, + } + + taskRecurer := func() { + go task.function() + task.timer.Reset(task.Interval) + } + + task.timer = time.AfterFunc(interval, taskRecurer) + + addTask(task) + + return task +} + +func (task *ScheduledTask) Cancel() { + task.timer.Stop() + removeTaskByName(task.Name) +} + +// Executes the task immediatly. A recurring task will be run regularally after interval. +func (task *ScheduledTask) Execute() { + task.function() + task.timer.Reset(task.Interval) +} + +func (task *ScheduledTask) String() string { + return fmt.Sprintf( + "%s\nInterval: %s\nRecurring: %t\n", + task.Name, + task.Interval.String(), + task.Recurring, + ) +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/search_params.go b/vendor/github.com/mattermost/mattermost-server/model/search_params.go new file mode 100644 index 00000000..1692b3aa --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/search_params.go @@ -0,0 +1,171 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "regexp" + "strings" +) + +var searchTermPuncStart = regexp.MustCompile(`^[^\pL\d\s#"]+`) +var searchTermPuncEnd = regexp.MustCompile(`[^\pL\d\s*"]+$`) + +type SearchParams struct { + Terms string + IsHashtag bool + InChannels []string + FromUsers []string + OrTerms bool +} + +func (o *SearchParams) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +var searchFlags = [...]string{"from", "channel", "in"} + +func splitWords(text string) []string { + words := []string{} + + foundQuote := false + location := 0 + for i, char := range text { + if char == '"' { + if foundQuote { + // Grab the quoted section + word := text[location : i+1] + words = append(words, word) + foundQuote = false + location = i + 1 + } else { + words = append(words, strings.Fields(text[location:i])...) + foundQuote = true + location = i + } + } + } + + words = append(words, strings.Fields(text[location:])...) + + return words +} + +func parseSearchFlags(input []string) ([]string, [][2]string) { + words := []string{} + flags := [][2]string{} + + skipNextWord := false + for i, word := range input { + if skipNextWord { + skipNextWord = false + continue + } + + isFlag := false + + if colon := strings.Index(word, ":"); colon != -1 { + flag := word[:colon] + value := word[colon+1:] + + for _, searchFlag := range searchFlags { + // check for case insensitive equality + if strings.EqualFold(flag, searchFlag) { + if value != "" { + flags = append(flags, [2]string{searchFlag, value}) + isFlag = true + } else if i < len(input)-1 { + flags = append(flags, [2]string{searchFlag, input[i+1]}) + skipNextWord = true + isFlag = true + } + + if isFlag { + break + } + } + } + } + + if !isFlag { + // trim off surrounding punctuation (note that we leave trailing asterisks to allow wildcards) + word = searchTermPuncStart.ReplaceAllString(word, "") + word = searchTermPuncEnd.ReplaceAllString(word, "") + + // and remove extra pound #s + word = hashtagStart.ReplaceAllString(word, "#") + + if len(word) != 0 { + words = append(words, word) + } + } + } + + return words, flags +} + +func ParseSearchParams(text string) []*SearchParams { + words, flags := parseSearchFlags(splitWords(text)) + + hashtagTermList := []string{} + plainTermList := []string{} + + for _, word := range words { + if validHashtag.MatchString(word) { + hashtagTermList = append(hashtagTermList, word) + } else { + plainTermList = append(plainTermList, word) + } + } + + hashtagTerms := strings.Join(hashtagTermList, " ") + plainTerms := strings.Join(plainTermList, " ") + + inChannels := []string{} + fromUsers := []string{} + + for _, flagPair := range flags { + flag := flagPair[0] + value := flagPair[1] + + if flag == "in" || flag == "channel" { + inChannels = append(inChannels, value) + } else if flag == "from" { + fromUsers = append(fromUsers, value) + } + } + + paramsList := []*SearchParams{} + + if len(plainTerms) > 0 { + paramsList = append(paramsList, &SearchParams{ + Terms: plainTerms, + IsHashtag: false, + InChannels: inChannels, + FromUsers: fromUsers, + }) + } + + if len(hashtagTerms) > 0 { + paramsList = append(paramsList, &SearchParams{ + Terms: hashtagTerms, + IsHashtag: true, + InChannels: inChannels, + FromUsers: fromUsers, + }) + } + + // special case for when no terms are specified but we still have a filter + if len(plainTerms) == 0 && len(hashtagTerms) == 0 && (len(inChannels) != 0 || len(fromUsers) != 0) { + paramsList = append(paramsList, &SearchParams{ + Terms: "", + IsHashtag: false, + InChannels: inChannels, + FromUsers: fromUsers, + }) + } + + return paramsList +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/security_bulletin.go b/vendor/github.com/mattermost/mattermost-server/model/security_bulletin.go new file mode 100644 index 00000000..958b9c9e --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/security_bulletin.go @@ -0,0 +1,41 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type SecurityBulletin struct { + Id string `json:"id"` + AppliesToVersion string `json:"applies_to_version"` +} + +type SecurityBulletins []SecurityBulletin + +func (me *SecurityBulletin) ToJson() string { + b, _ := json.Marshal(me) + return string(b) +} + +func SecurityBulletinFromJson(data io.Reader) *SecurityBulletin { + var o *SecurityBulletin + json.NewDecoder(data).Decode(&o) + return o +} + +func (me SecurityBulletins) ToJson() string { + if b, err := json.Marshal(me); err != nil { + return "[]" + } else { + return string(b) + } +} + +func SecurityBulletinsFromJson(data io.Reader) SecurityBulletins { + var o SecurityBulletins + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/session.go b/vendor/github.com/mattermost/mattermost-server/model/session.go new file mode 100644 index 00000000..a407af26 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/session.go @@ -0,0 +1,137 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "strings" +) + +const ( + SESSION_COOKIE_TOKEN = "MMAUTHTOKEN" + SESSION_COOKIE_USER = "MMUSERID" + SESSION_CACHE_SIZE = 35000 + SESSION_PROP_PLATFORM = "platform" + SESSION_PROP_OS = "os" + SESSION_PROP_BROWSER = "browser" + SESSION_PROP_TYPE = "type" + SESSION_PROP_USER_ACCESS_TOKEN_ID = "user_access_token_id" + SESSION_TYPE_USER_ACCESS_TOKEN = "UserAccessToken" + SESSION_ACTIVITY_TIMEOUT = 1000 * 60 * 5 // 5 minutes + SESSION_USER_ACCESS_TOKEN_EXPIRY = 100 * 365 // 100 years +) + +type Session struct { + Id string `json:"id"` + Token string `json:"token"` + CreateAt int64 `json:"create_at"` + ExpiresAt int64 `json:"expires_at"` + LastActivityAt int64 `json:"last_activity_at"` + UserId string `json:"user_id"` + DeviceId string `json:"device_id"` + Roles string `json:"roles"` + IsOAuth bool `json:"is_oauth"` + Props StringMap `json:"props"` + TeamMembers []*TeamMember `json:"team_members" db:"-"` +} + +func (me *Session) DeepCopy() *Session { + copy := *me + return © +} + +func (me *Session) ToJson() string { + b, _ := json.Marshal(me) + return string(b) +} + +func SessionFromJson(data io.Reader) *Session { + var me *Session + json.NewDecoder(data).Decode(&me) + return me +} + +func (me *Session) PreSave() { + if me.Id == "" { + me.Id = NewId() + } + + if me.Token == "" { + me.Token = NewId() + } + + me.CreateAt = GetMillis() + me.LastActivityAt = me.CreateAt + + if me.Props == nil { + me.Props = make(map[string]string) + } +} + +func (me *Session) Sanitize() { + me.Token = "" +} + +func (me *Session) IsExpired() bool { + + if me.ExpiresAt <= 0 { + return false + } + + if GetMillis() > me.ExpiresAt { + return true + } + + return false +} + +func (me *Session) SetExpireInDays(days int) { + if me.CreateAt == 0 { + me.ExpiresAt = GetMillis() + (1000 * 60 * 60 * 24 * int64(days)) + } else { + me.ExpiresAt = me.CreateAt + (1000 * 60 * 60 * 24 * int64(days)) + } +} + +func (me *Session) AddProp(key string, value string) { + + if me.Props == nil { + me.Props = make(map[string]string) + } + + me.Props[key] = value +} + +func (me *Session) GetTeamByTeamId(teamId string) *TeamMember { + for _, team := range me.TeamMembers { + if team.TeamId == teamId { + return team + } + } + + return nil +} + +func (me *Session) IsMobileApp() bool { + return len(me.DeviceId) > 0 +} + +func (me *Session) GetUserRoles() []string { + return strings.Fields(me.Roles) +} + +func SessionsToJson(o []*Session) string { + if b, err := json.Marshal(o); err != nil { + return "[]" + } else { + return string(b) + } +} + +func SessionsFromJson(data io.Reader) []*Session { + var o []*Session + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/slack_attachment.go b/vendor/github.com/mattermost/mattermost-server/model/slack_attachment.go new file mode 100644 index 00000000..197d3f0f --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/slack_attachment.go @@ -0,0 +1,59 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "fmt" +) + +type SlackAttachment struct { + Id int64 `json:"id"` + Fallback string `json:"fallback"` + Color string `json:"color"` + Pretext string `json:"pretext"` + AuthorName string `json:"author_name"` + AuthorLink string `json:"author_link"` + AuthorIcon string `json:"author_icon"` + Title string `json:"title"` + TitleLink string `json:"title_link"` + Text string `json:"text"` + Fields []*SlackAttachmentField `json:"fields"` + ImageURL string `json:"image_url"` + ThumbURL string `json:"thumb_url"` + Footer string `json:"footer"` + FooterIcon string `json:"footer_icon"` + Timestamp interface{} `json:"ts"` // This is either a string or an int64 + Actions []*PostAction `json:"actions,omitempty"` +} + +type SlackAttachmentField struct { + Title string `json:"title"` + Value interface{} `json:"value"` + Short bool `json:"short"` +} + +func StringifySlackFieldValue(a []*SlackAttachment) []*SlackAttachment { + var nonNilAttachments []*SlackAttachment + for _, attachment := range a { + if attachment == nil { + continue + } + nonNilAttachments = append(nonNilAttachments, attachment) + + var nonNilFields []*SlackAttachmentField + for _, field := range attachment.Fields { + if field == nil { + continue + } + nonNilFields = append(nonNilFields, field) + + if field.Value != nil { + // Ensure the value is set to a string if it is set + field.Value = fmt.Sprintf("%v", field.Value) + } + } + attachment.Fields = nonNilFields + } + return nonNilAttachments +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/status.go b/vendor/github.com/mattermost/mattermost-server/model/status.go new file mode 100644 index 00000000..cd9e32ed --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/status.go @@ -0,0 +1,60 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + STATUS_OFFLINE = "offline" + STATUS_AWAY = "away" + STATUS_DND = "dnd" + STATUS_ONLINE = "online" + STATUS_CACHE_SIZE = SESSION_CACHE_SIZE + STATUS_CHANNEL_TIMEOUT = 20000 // 20 seconds + STATUS_MIN_UPDATE_TIME = 120000 // 2 minutes +) + +type Status struct { + UserId string `json:"user_id"` + Status string `json:"status"` + Manual bool `json:"manual"` + LastActivityAt int64 `json:"last_activity_at"` + ActiveChannel string `json:"-" db:"-"` +} + +func (o *Status) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func StatusFromJson(data io.Reader) *Status { + var o *Status + json.NewDecoder(data).Decode(&o) + return o +} + +func StatusListToJson(u []*Status) string { + b, _ := json.Marshal(u) + return string(b) +} + +func StatusListFromJson(data io.Reader) []*Status { + var statuses []*Status + json.NewDecoder(data).Decode(&statuses) + return statuses +} + +func StatusMapToInterfaceMap(statusMap map[string]*Status) map[string]interface{} { + interfaceMap := map[string]interface{}{} + for _, s := range statusMap { + // Omitted statues mean offline + if s.Status != STATUS_OFFLINE { + interfaceMap[s.UserId] = s.Status + } + } + return interfaceMap +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/suggest_command.go b/vendor/github.com/mattermost/mattermost-server/model/suggest_command.go new file mode 100644 index 00000000..44f46bf7 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/suggest_command.go @@ -0,0 +1,25 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type SuggestCommand struct { + Suggestion string `json:"suggestion"` + Description string `json:"description"` +} + +func (o *SuggestCommand) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func SuggestCommandFromJson(data io.Reader) *SuggestCommand { + var o *SuggestCommand + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/switch_request.go b/vendor/github.com/mattermost/mattermost-server/model/switch_request.go new file mode 100644 index 00000000..e153c92f --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/switch_request.go @@ -0,0 +1,53 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type SwitchRequest struct { + CurrentService string `json:"current_service"` + NewService string `json:"new_service"` + Email string `json:"email"` + Password string `json:"password"` + NewPassword string `json:"new_password"` + MfaCode string `json:"mfa_code"` + LdapId string `json:"ldap_id"` +} + +func (o *SwitchRequest) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func SwitchRequestFromJson(data io.Reader) *SwitchRequest { + var o *SwitchRequest + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *SwitchRequest) EmailToOAuth() bool { + return o.CurrentService == USER_AUTH_SERVICE_EMAIL && + (o.NewService == USER_AUTH_SERVICE_SAML || + o.NewService == USER_AUTH_SERVICE_GITLAB || + o.NewService == SERVICE_GOOGLE || + o.NewService == SERVICE_OFFICE365) +} + +func (o *SwitchRequest) OAuthToEmail() bool { + return (o.CurrentService == USER_AUTH_SERVICE_SAML || + o.CurrentService == USER_AUTH_SERVICE_GITLAB || + o.CurrentService == SERVICE_GOOGLE || + o.CurrentService == SERVICE_OFFICE365) && o.NewService == USER_AUTH_SERVICE_EMAIL +} + +func (o *SwitchRequest) EmailToLdap() bool { + return o.CurrentService == USER_AUTH_SERVICE_EMAIL && o.NewService == USER_AUTH_SERVICE_LDAP +} + +func (o *SwitchRequest) LdapToEmail() bool { + return o.CurrentService == USER_AUTH_SERVICE_LDAP && o.NewService == USER_AUTH_SERVICE_EMAIL +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/system.go b/vendor/github.com/mattermost/mattermost-server/model/system.go new file mode 100644 index 00000000..2a636b14 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/system.go @@ -0,0 +1,46 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "math/big" +) + +const ( + SYSTEM_DIAGNOSTIC_ID = "DiagnosticId" + SYSTEM_RAN_UNIT_TESTS = "RanUnitTests" + SYSTEM_LAST_SECURITY_TIME = "LastSecurityTime" + SYSTEM_ACTIVE_LICENSE_ID = "ActiveLicenseId" + SYSTEM_LAST_COMPLIANCE_TIME = "LastComplianceTime" + SYSTEM_ASYMMETRIC_SIGNING_KEY = "AsymmetricSigningKey" +) + +type System struct { + Name string `json:"name"` + Value string `json:"value"` +} + +func (o *System) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func SystemFromJson(data io.Reader) *System { + var o *System + json.NewDecoder(data).Decode(&o) + return o +} + +type SystemAsymmetricSigningKey struct { + ECDSAKey *SystemECDSAKey `json:"ecdsa_key,omitempty"` +} + +type SystemECDSAKey struct { + Curve string `json:"curve"` + X *big.Int `json:"x"` + Y *big.Int `json:"y"` + D *big.Int `json:"d,omitempty"` +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/team.go b/vendor/github.com/mattermost/mattermost-server/model/team.go new file mode 100644 index 00000000..5b6eb1fa --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/team.go @@ -0,0 +1,294 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + TEAM_OPEN = "O" + TEAM_INVITE = "I" + TEAM_ALLOWED_DOMAINS_MAX_LENGTH = 500 + TEAM_COMPANY_NAME_MAX_LENGTH = 64 + TEAM_DESCRIPTION_MAX_LENGTH = 255 + TEAM_DISPLAY_NAME_MAX_RUNES = 64 + TEAM_EMAIL_MAX_LENGTH = 128 + TEAM_NAME_MAX_LENGTH = 64 + TEAM_NAME_MIN_LENGTH = 2 +) + +type Team struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + DisplayName string `json:"display_name"` + Name string `json:"name"` + Description string `json:"description"` + Email string `json:"email"` + Type string `json:"type"` + CompanyName string `json:"company_name"` + AllowedDomains string `json:"allowed_domains"` + InviteId string `json:"invite_id"` + AllowOpenInvite bool `json:"allow_open_invite"` +} + +type TeamPatch struct { + DisplayName *string `json:"display_name"` + Description *string `json:"description"` + CompanyName *string `json:"company_name"` + InviteId *string `json:"invite_id"` + AllowOpenInvite *bool `json:"allow_open_invite"` +} + +type Invites struct { + Invites []map[string]string `json:"invites"` +} + +func InvitesFromJson(data io.Reader) *Invites { + var o *Invites + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *Invites) ToEmailList() []string { + emailList := make([]string, len(o.Invites)) + for _, invite := range o.Invites { + emailList = append(emailList, invite["email"]) + } + return emailList +} + +func (o *Invites) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func (o *Team) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func TeamFromJson(data io.Reader) *Team { + var o *Team + json.NewDecoder(data).Decode(&o) + return o +} + +func TeamMapToJson(u map[string]*Team) string { + b, _ := json.Marshal(u) + return string(b) +} + +func TeamMapFromJson(data io.Reader) map[string]*Team { + var teams map[string]*Team + json.NewDecoder(data).Decode(&teams) + return teams +} + +func TeamListToJson(t []*Team) string { + b, _ := json.Marshal(t) + return string(b) +} + +func TeamListFromJson(data io.Reader) []*Team { + var teams []*Team + json.NewDecoder(data).Decode(&teams) + return teams +} + +func (o *Team) Etag() string { + return Etag(o.Id, o.UpdateAt) +} + +func (o *Team) IsValid() *AppError { + + if len(o.Id) != 26 { + return NewAppError("Team.IsValid", "model.team.is_valid.id.app_error", nil, "", http.StatusBadRequest) + } + + if o.CreateAt == 0 { + return NewAppError("Team.IsValid", "model.team.is_valid.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if o.UpdateAt == 0 { + return NewAppError("Team.IsValid", "model.team.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if len(o.Email) > TEAM_EMAIL_MAX_LENGTH { + return NewAppError("Team.IsValid", "model.team.is_valid.email.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if len(o.Email) > 0 && !IsValidEmail(o.Email) { + return NewAppError("Team.IsValid", "model.team.is_valid.email.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if utf8.RuneCountInString(o.DisplayName) == 0 || utf8.RuneCountInString(o.DisplayName) > TEAM_DISPLAY_NAME_MAX_RUNES { + return NewAppError("Team.IsValid", "model.team.is_valid.name.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if len(o.Name) > TEAM_NAME_MAX_LENGTH { + return NewAppError("Team.IsValid", "model.team.is_valid.url.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if len(o.Description) > TEAM_DESCRIPTION_MAX_LENGTH { + return NewAppError("Team.IsValid", "model.team.is_valid.description.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if IsReservedTeamName(o.Name) { + return NewAppError("Team.IsValid", "model.team.is_valid.reserved.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if !IsValidTeamName(o.Name) { + return NewAppError("Team.IsValid", "model.team.is_valid.characters.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if !(o.Type == TEAM_OPEN || o.Type == TEAM_INVITE) { + return NewAppError("Team.IsValid", "model.team.is_valid.type.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if len(o.CompanyName) > TEAM_COMPANY_NAME_MAX_LENGTH { + return NewAppError("Team.IsValid", "model.team.is_valid.company.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if len(o.AllowedDomains) > TEAM_ALLOWED_DOMAINS_MAX_LENGTH { + return NewAppError("Team.IsValid", "model.team.is_valid.domains.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + return nil +} + +func (o *Team) PreSave() { + if o.Id == "" { + o.Id = NewId() + } + + o.CreateAt = GetMillis() + o.UpdateAt = o.CreateAt + + if len(o.InviteId) == 0 { + o.InviteId = NewId() + } +} + +func (o *Team) PreUpdate() { + o.UpdateAt = GetMillis() +} + +func IsReservedTeamName(s string) bool { + s = strings.ToLower(s) + + for _, value := range reservedName { + if strings.Index(s, value) == 0 { + return true + } + } + + return false +} + +func IsValidTeamName(s string) bool { + + if !IsValidAlphaNum(s) { + return false + } + + if len(s) < TEAM_NAME_MIN_LENGTH { + return false + } + + return true +} + +var validTeamNameCharacter = regexp.MustCompile(`^[a-z0-9-]$`) + +func CleanTeamName(s string) string { + s = strings.ToLower(strings.Replace(s, " ", "-", -1)) + + for _, value := range reservedName { + if strings.Index(s, value) == 0 { + s = strings.Replace(s, value, "", -1) + } + } + + s = strings.TrimSpace(s) + + for _, c := range s { + char := fmt.Sprintf("%c", c) + if !validTeamNameCharacter.MatchString(char) { + s = strings.Replace(s, char, "", -1) + } + } + + s = strings.Trim(s, "-") + + if !IsValidTeamName(s) { + s = NewId() + } + + return s +} + +func (o *Team) Sanitize() { + o.Email = "" + o.AllowedDomains = "" +} + +func (o *Team) SanitizeForNotLoggedIn() { + o.Email = "" + o.AllowedDomains = "" + o.CompanyName = "" + if !o.AllowOpenInvite { + o.InviteId = "" + } +} + +func (t *Team) Patch(patch *TeamPatch) { + if patch.DisplayName != nil { + t.DisplayName = *patch.DisplayName + } + + if patch.Description != nil { + t.Description = *patch.Description + } + + if patch.CompanyName != nil { + t.CompanyName = *patch.CompanyName + } + + if patch.InviteId != nil { + t.InviteId = *patch.InviteId + } + + if patch.AllowOpenInvite != nil { + t.AllowOpenInvite = *patch.AllowOpenInvite + } +} + +func (t *TeamPatch) ToJson() string { + b, err := json.Marshal(t) + if err != nil { + return "" + } + + return string(b) +} + +func TeamPatchFromJson(data io.Reader) *TeamPatch { + decoder := json.NewDecoder(data) + var team TeamPatch + err := decoder.Decode(&team) + if err != nil { + return nil + } + + return &team +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/team_member.go b/vendor/github.com/mattermost/mattermost-server/model/team_member.go new file mode 100644 index 00000000..2fcd1e15 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/team_member.go @@ -0,0 +1,94 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" + "strings" +) + +type TeamMember struct { + TeamId string `json:"team_id"` + UserId string `json:"user_id"` + Roles string `json:"roles"` + DeleteAt int64 `json:"delete_at"` +} + +type TeamUnread struct { + TeamId string `json:"team_id"` + MsgCount int64 `json:"msg_count"` + MentionCount int64 `json:"mention_count"` +} + +func (o *TeamMember) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func (o *TeamUnread) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func TeamMemberFromJson(data io.Reader) *TeamMember { + var o *TeamMember + json.NewDecoder(data).Decode(&o) + return o +} + +func TeamUnreadFromJson(data io.Reader) *TeamUnread { + var o *TeamUnread + json.NewDecoder(data).Decode(&o) + return o +} + +func TeamMembersToJson(o []*TeamMember) string { + if b, err := json.Marshal(o); err != nil { + return "[]" + } else { + return string(b) + } +} + +func TeamMembersFromJson(data io.Reader) []*TeamMember { + var o []*TeamMember + json.NewDecoder(data).Decode(&o) + return o +} + +func TeamsUnreadToJson(o []*TeamUnread) string { + if b, err := json.Marshal(o); err != nil { + return "[]" + } else { + return string(b) + } +} + +func TeamsUnreadFromJson(data io.Reader) []*TeamUnread { + var o []*TeamUnread + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *TeamMember) IsValid() *AppError { + + if len(o.TeamId) != 26 { + return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.team_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.UserId) != 26 { + return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (o *TeamMember) PreUpdate() { +} + +func (o *TeamMember) GetRoles() []string { + return strings.Fields(o.Roles) +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/team_search.go b/vendor/github.com/mattermost/mattermost-server/model/team_search.go new file mode 100644 index 00000000..e0676022 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/team_search.go @@ -0,0 +1,35 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type TeamSearch struct { + Term string `json:"term"` +} + +// ToJson convert a TeamSearch to json string +func (c *TeamSearch) ToJson() string { + b, err := json.Marshal(c) + if err != nil { + return "" + } + + return string(b) +} + +// TeamSearchFromJson decodes the input and returns a TeamSearch +func TeamSearchFromJson(data io.Reader) *TeamSearch { + decoder := json.NewDecoder(data) + var cs TeamSearch + err := decoder.Decode(&cs) + if err == nil { + return &cs + } + + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/team_stats.go b/vendor/github.com/mattermost/mattermost-server/model/team_stats.go new file mode 100644 index 00000000..0d688b80 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/team_stats.go @@ -0,0 +1,26 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type TeamStats struct { + TeamId string `json:"team_id"` + TotalMemberCount int64 `json:"total_member_count"` + ActiveMemberCount int64 `json:"active_member_count"` +} + +func (o *TeamStats) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func TeamStatsFromJson(data io.Reader) *TeamStats { + var o *TeamStats + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/token.go b/vendor/github.com/mattermost/mattermost-server/model/token.go new file mode 100644 index 00000000..a4d10c7f --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/token.go @@ -0,0 +1,40 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import "net/http" + +const ( + TOKEN_SIZE = 64 + MAX_TOKEN_EXIPRY_TIME = 1000 * 60 * 60 * 24 // 24 hour + TOKEN_TYPE_OAUTH = "oauth" +) + +type Token struct { + Token string + CreateAt int64 + Type string + Extra string +} + +func NewToken(tokentype, extra string) *Token { + return &Token{ + Token: NewRandomString(TOKEN_SIZE), + CreateAt: GetMillis(), + Type: tokentype, + Extra: extra, + } +} + +func (t *Token) IsValid() *AppError { + if len(t.Token) != TOKEN_SIZE { + return NewAppError("Token.IsValid", "model.token.is_valid.size", nil, "", http.StatusInternalServerError) + } + + if t.CreateAt == 0 { + return NewAppError("Token.IsValid", "model.token.is_valid.expiry", nil, "", http.StatusInternalServerError) + } + + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/user.go b/vendor/github.com/mattermost/mattermost-server/model/user.go new file mode 100644 index 00000000..1e1d49f7 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/user.go @@ -0,0 +1,616 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "regexp" + "strings" + "unicode/utf8" + + "golang.org/x/crypto/bcrypt" +) + +const ( + ME = "me" + USER_NOTIFY_ALL = "all" + USER_NOTIFY_MENTION = "mention" + USER_NOTIFY_NONE = "none" + DESKTOP_NOTIFY_PROP = "desktop" + DESKTOP_SOUND_NOTIFY_PROP = "desktop_sound" + DESKTOP_DURATION_NOTIFY_PROP = "desktop_duration" + MARK_UNREAD_NOTIFY_PROP = "mark_unread" + PUSH_NOTIFY_PROP = "push" + PUSH_STATUS_NOTIFY_PROP = "push_status" + EMAIL_NOTIFY_PROP = "email" + CHANNEL_MENTIONS_NOTIFY_PROP = "channel" + COMMENTS_NOTIFY_PROP = "comments" + MENTION_KEYS_NOTIFY_PROP = "mention_keys" + COMMENTS_NOTIFY_NEVER = "never" + COMMENTS_NOTIFY_ROOT = "root" + COMMENTS_NOTIFY_ANY = "any" + + DEFAULT_LOCALE = "en" + USER_AUTH_SERVICE_EMAIL = "email" + + USER_EMAIL_MAX_LENGTH = 128 + USER_NICKNAME_MAX_RUNES = 64 + USER_POSITION_MAX_RUNES = 128 + USER_FIRST_NAME_MAX_RUNES = 64 + USER_LAST_NAME_MAX_RUNES = 64 + USER_AUTH_DATA_MAX_LENGTH = 128 + USER_NAME_MAX_LENGTH = 64 + USER_NAME_MIN_LENGTH = 1 + USER_PASSWORD_MAX_LENGTH = 72 +) + +type User struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at,omitempty"` + UpdateAt int64 `json:"update_at,omitempty"` + DeleteAt int64 `json:"delete_at"` + Username string `json:"username"` + Password string `json:"password,omitempty"` + AuthData *string `json:"auth_data,omitempty"` + AuthService string `json:"auth_service"` + Email string `json:"email"` + EmailVerified bool `json:"email_verified,omitempty"` + Nickname string `json:"nickname"` + FirstName string `json:"first_name"` + LastName string `json:"last_name"` + Position string `json:"position"` + Roles string `json:"roles"` + AllowMarketing bool `json:"allow_marketing,omitempty"` + Props StringMap `json:"props,omitempty"` + NotifyProps StringMap `json:"notify_props,omitempty"` + LastPasswordUpdate int64 `json:"last_password_update,omitempty"` + LastPictureUpdate int64 `json:"last_picture_update,omitempty"` + FailedAttempts int `json:"failed_attempts,omitempty"` + Locale string `json:"locale"` + MfaActive bool `json:"mfa_active,omitempty"` + MfaSecret string `json:"mfa_secret,omitempty"` + LastActivityAt int64 `db:"-" json:"last_activity_at,omitempty"` +} + +type UserPatch struct { + Username *string `json:"username"` + Nickname *string `json:"nickname"` + FirstName *string `json:"first_name"` + LastName *string `json:"last_name"` + Position *string `json:"position"` + Email *string `json:"email"` + Props StringMap `json:"props,omitempty"` + NotifyProps StringMap `json:"notify_props,omitempty"` + Locale *string `json:"locale"` +} + +type UserAuth struct { + Password string `json:"password,omitempty"` + AuthData *string `json:"auth_data,omitempty"` + AuthService string `json:"auth_service,omitempty"` +} + +// IsValid validates the user and returns an error if it isn't configured +// correctly. +func (u *User) IsValid() *AppError { + + if len(u.Id) != 26 { + return InvalidUserError("id", "") + } + + if u.CreateAt == 0 { + return InvalidUserError("create_at", u.Id) + } + + if u.UpdateAt == 0 { + return InvalidUserError("update_at", u.Id) + } + + if !IsValidUsername(u.Username) { + return InvalidUserError("username", u.Id) + } + + if len(u.Email) > USER_EMAIL_MAX_LENGTH || len(u.Email) == 0 { + return InvalidUserError("email", u.Id) + } + + if utf8.RuneCountInString(u.Nickname) > USER_NICKNAME_MAX_RUNES { + return InvalidUserError("nickname", u.Id) + } + + if utf8.RuneCountInString(u.Position) > USER_POSITION_MAX_RUNES { + return InvalidUserError("position", u.Id) + } + + if utf8.RuneCountInString(u.FirstName) > USER_FIRST_NAME_MAX_RUNES { + return InvalidUserError("first_name", u.Id) + } + + if utf8.RuneCountInString(u.LastName) > USER_LAST_NAME_MAX_RUNES { + return InvalidUserError("last_name", u.Id) + } + + if u.AuthData != nil && len(*u.AuthData) > USER_AUTH_DATA_MAX_LENGTH { + return InvalidUserError("auth_data", u.Id) + } + + if u.AuthData != nil && len(*u.AuthData) > 0 && len(u.AuthService) == 0 { + return InvalidUserError("auth_data_type", u.Id) + } + + if len(u.Password) > 0 && u.AuthData != nil && len(*u.AuthData) > 0 { + return InvalidUserError("auth_data_pwd", u.Id) + } + + if len(u.Password) > USER_PASSWORD_MAX_LENGTH { + return InvalidUserError("password_limit", u.Id) + } + + return nil +} + +func InvalidUserError(fieldName string, userId string) *AppError { + id := fmt.Sprintf("model.user.is_valid.%s.app_error", fieldName) + details := "" + if userId != "" { + details = "user_id=" + userId + } + return NewAppError("User.IsValid", id, nil, details, http.StatusBadRequest) +} + +func NormalizeUsername(username string) string { + return strings.ToLower(username) +} + +func NormalizeEmail(email string) string { + return strings.ToLower(email) +} + +// PreSave will set the Id and Username if missing. It will also fill +// in the CreateAt, UpdateAt times. It will also hash the password. It should +// be run before saving the user to the db. +func (u *User) PreSave() { + if u.Id == "" { + u.Id = NewId() + } + + if u.Username == "" { + u.Username = NewId() + } + + if u.AuthData != nil && *u.AuthData == "" { + u.AuthData = nil + } + + u.Username = NormalizeUsername(u.Username) + u.Email = NormalizeEmail(u.Email) + + u.CreateAt = GetMillis() + u.UpdateAt = u.CreateAt + + u.LastPasswordUpdate = u.CreateAt + + u.MfaActive = false + + if u.Locale == "" { + u.Locale = DEFAULT_LOCALE + } + + if u.Props == nil { + u.Props = make(map[string]string) + } + + if u.NotifyProps == nil || len(u.NotifyProps) == 0 { + u.SetDefaultNotifications() + } + + if len(u.Password) > 0 { + u.Password = HashPassword(u.Password) + } +} + +// PreUpdate should be run before updating the user in the db. +func (u *User) PreUpdate() { + u.Username = NormalizeUsername(u.Username) + u.Email = NormalizeEmail(u.Email) + u.UpdateAt = GetMillis() + + if u.AuthData != nil && *u.AuthData == "" { + u.AuthData = nil + } + + if u.NotifyProps == nil || len(u.NotifyProps) == 0 { + u.SetDefaultNotifications() + } else if _, ok := u.NotifyProps["mention_keys"]; ok { + // Remove any blank mention keys + splitKeys := strings.Split(u.NotifyProps["mention_keys"], ",") + goodKeys := []string{} + for _, key := range splitKeys { + if len(key) > 0 { + goodKeys = append(goodKeys, strings.ToLower(key)) + } + } + u.NotifyProps["mention_keys"] = strings.Join(goodKeys, ",") + } +} + +func (u *User) SetDefaultNotifications() { + u.NotifyProps = make(map[string]string) + u.NotifyProps["email"] = "true" + u.NotifyProps["push"] = USER_NOTIFY_MENTION + u.NotifyProps["desktop"] = USER_NOTIFY_MENTION + u.NotifyProps["desktop_sound"] = "true" + u.NotifyProps["mention_keys"] = u.Username + ",@" + u.Username + u.NotifyProps["channel"] = "true" + u.NotifyProps["push_status"] = STATUS_AWAY + u.NotifyProps["comments"] = "never" + u.NotifyProps["first_name"] = "false" +} + +func (user *User) UpdateMentionKeysFromUsername(oldUsername string) { + nonUsernameKeys := []string{} + splitKeys := strings.Split(user.NotifyProps["mention_keys"], ",") + for _, key := range splitKeys { + if key != oldUsername && key != "@"+oldUsername { + nonUsernameKeys = append(nonUsernameKeys, key) + } + } + + user.NotifyProps["mention_keys"] = user.Username + ",@" + user.Username + if len(nonUsernameKeys) > 0 { + user.NotifyProps["mention_keys"] += "," + strings.Join(nonUsernameKeys, ",") + } +} + +func (u *User) Patch(patch *UserPatch) { + if patch.Username != nil { + u.Username = *patch.Username + } + + if patch.Nickname != nil { + u.Nickname = *patch.Nickname + } + + if patch.FirstName != nil { + u.FirstName = *patch.FirstName + } + + if patch.LastName != nil { + u.LastName = *patch.LastName + } + + if patch.Position != nil { + u.Position = *patch.Position + } + + if patch.Email != nil { + u.Email = *patch.Email + } + + if patch.Props != nil { + u.Props = patch.Props + } + + if patch.NotifyProps != nil { + u.NotifyProps = patch.NotifyProps + } + + if patch.Locale != nil { + u.Locale = *patch.Locale + } +} + +// ToJson convert a User to a json string +func (u *User) ToJson() string { + b, _ := json.Marshal(u) + return string(b) +} + +func (u *UserPatch) ToJson() string { + b, _ := json.Marshal(u) + return string(b) +} + +func (u *UserAuth) ToJson() string { + b, _ := json.Marshal(u) + return string(b) +} + +// Generate a valid strong etag so the browser can cache the results +func (u *User) Etag(showFullName, showEmail bool) string { + return Etag(u.Id, u.UpdateAt, showFullName, showEmail) +} + +// Remove any private data from the user object +func (u *User) Sanitize(options map[string]bool) { + u.Password = "" + u.AuthData = NewString("") + u.MfaSecret = "" + + if len(options) != 0 && !options["email"] { + u.Email = "" + } + if len(options) != 0 && !options["fullname"] { + u.FirstName = "" + u.LastName = "" + } + if len(options) != 0 && !options["passwordupdate"] { + u.LastPasswordUpdate = 0 + } + if len(options) != 0 && !options["authservice"] { + u.AuthService = "" + } +} + +func (u *User) ClearNonProfileFields() { + u.Password = "" + u.AuthData = NewString("") + u.MfaSecret = "" + u.EmailVerified = false + u.AllowMarketing = false + u.NotifyProps = StringMap{} + u.LastPasswordUpdate = 0 + u.FailedAttempts = 0 +} + +func (u *User) SanitizeProfile(options map[string]bool) { + u.ClearNonProfileFields() + + u.Sanitize(options) +} + +func (u *User) MakeNonNil() { + if u.Props == nil { + u.Props = make(map[string]string) + } + + if u.NotifyProps == nil { + u.NotifyProps = make(map[string]string) + } +} + +func (u *User) AddProp(key string, value string) { + u.MakeNonNil() + + u.Props[key] = value +} + +func (u *User) AddNotifyProp(key string, value string) { + u.MakeNonNil() + + u.NotifyProps[key] = value +} + +func (u *User) GetFullName() string { + if u.FirstName != "" && u.LastName != "" { + return u.FirstName + " " + u.LastName + } else if u.FirstName != "" { + return u.FirstName + } else if u.LastName != "" { + return u.LastName + } else { + return "" + } +} + +func (u *User) GetDisplayName(nameFormat string) string { + displayName := u.Username + + if nameFormat == SHOW_NICKNAME_FULLNAME { + if u.Nickname != "" { + displayName = u.Nickname + } else if fullName := u.GetFullName(); fullName != "" { + displayName = fullName + } + } else if nameFormat == SHOW_FULLNAME { + if fullName := u.GetFullName(); fullName != "" { + displayName = fullName + } + } + + return displayName +} + +func (u *User) GetRoles() []string { + return strings.Fields(u.Roles) +} + +func (u *User) GetRawRoles() string { + return u.Roles +} + +func IsValidUserRoles(userRoles string) bool { + + roles := strings.Fields(userRoles) + + for _, r := range roles { + if !isValidRole(r) { + return false + } + } + + // Exclude just the system_admin role explicitly to prevent mistakes + if len(roles) == 1 && roles[0] == "system_admin" { + return false + } + + return true +} + +func isValidRole(roleId string) bool { + _, ok := DefaultRoles[roleId] + return ok +} + +// Make sure you acually want to use this function. In context.go there are functions to check permissions +// This function should not be used to check permissions. +func (u *User) IsInRole(inRole string) bool { + return IsInRole(u.Roles, inRole) +} + +// Make sure you acually want to use this function. In context.go there are functions to check permissions +// This function should not be used to check permissions. +func IsInRole(userRoles string, inRole string) bool { + roles := strings.Split(userRoles, " ") + + for _, r := range roles { + if r == inRole { + return true + } + } + + return false +} + +func (u *User) IsSSOUser() bool { + return u.AuthService != "" && u.AuthService != USER_AUTH_SERVICE_EMAIL +} + +func (u *User) IsOAuthUser() bool { + return u.AuthService == USER_AUTH_SERVICE_GITLAB +} + +func (u *User) IsLDAPUser() bool { + return u.AuthService == USER_AUTH_SERVICE_LDAP +} + +func (u *User) IsSAMLUser() bool { + return u.AuthService == USER_AUTH_SERVICE_SAML +} + +// UserFromJson will decode the input and return a User +func UserFromJson(data io.Reader) *User { + var user *User + json.NewDecoder(data).Decode(&user) + return user +} + +func UserPatchFromJson(data io.Reader) *UserPatch { + var user *UserPatch + json.NewDecoder(data).Decode(&user) + return user +} + +func UserAuthFromJson(data io.Reader) *UserAuth { + var user *UserAuth + json.NewDecoder(data).Decode(&user) + return user +} + +func UserMapToJson(u map[string]*User) string { + b, _ := json.Marshal(u) + return string(b) +} + +func UserMapFromJson(data io.Reader) map[string]*User { + var users map[string]*User + json.NewDecoder(data).Decode(&users) + return users +} + +func UserListToJson(u []*User) string { + b, _ := json.Marshal(u) + return string(b) +} + +func UserListFromJson(data io.Reader) []*User { + var users []*User + json.NewDecoder(data).Decode(&users) + return users +} + +// HashPassword generates a hash using the bcrypt.GenerateFromPassword +func HashPassword(password string) string { + hash, err := bcrypt.GenerateFromPassword([]byte(password), 10) + if err != nil { + panic(err) + } + + return string(hash) +} + +// ComparePassword compares the hash +func ComparePassword(hash string, password string) bool { + + if len(password) == 0 || len(hash) == 0 { + return false + } + + err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(password)) + return err == nil +} + +var validUsernameChars = regexp.MustCompile(`^[a-z0-9\.\-_]+$`) + +var restrictedUsernames = []string{ + "all", + "channel", + "matterbot", +} + +func IsValidUsername(s string) bool { + if len(s) < USER_NAME_MIN_LENGTH || len(s) > USER_NAME_MAX_LENGTH { + return false + } + + if !validUsernameChars.MatchString(s) { + return false + } + + for _, restrictedUsername := range restrictedUsernames { + if s == restrictedUsername { + return false + } + } + + return true +} + +func CleanUsername(s string) string { + s = NormalizeUsername(strings.Replace(s, " ", "-", -1)) + + for _, value := range reservedName { + if s == value { + s = strings.Replace(s, value, "", -1) + } + } + + s = strings.TrimSpace(s) + + for _, c := range s { + char := fmt.Sprintf("%c", c) + if !validUsernameChars.MatchString(char) { + s = strings.Replace(s, char, "-", -1) + } + } + + s = strings.Trim(s, "-") + + if !IsValidUsername(s) { + s = "a" + NewId() + } + + return s +} + +func IsValidUserNotifyLevel(notifyLevel string) bool { + return notifyLevel == CHANNEL_NOTIFY_ALL || + notifyLevel == CHANNEL_NOTIFY_MENTION || + notifyLevel == CHANNEL_NOTIFY_NONE +} + +func IsValidPushStatusNotifyLevel(notifyLevel string) bool { + return notifyLevel == STATUS_ONLINE || + notifyLevel == STATUS_AWAY || + notifyLevel == STATUS_OFFLINE +} + +func IsValidCommentsNotifyLevel(notifyLevel string) bool { + return notifyLevel == COMMENTS_NOTIFY_ANY || + notifyLevel == COMMENTS_NOTIFY_ROOT || + notifyLevel == COMMENTS_NOTIFY_NEVER +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/user_access_token.go b/vendor/github.com/mattermost/mattermost-server/model/user_access_token.go new file mode 100644 index 00000000..bffd9fcb --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/user_access_token.go @@ -0,0 +1,65 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/http" +) + +type UserAccessToken struct { + Id string `json:"id"` + Token string `json:"token,omitempty"` + UserId string `json:"user_id"` + Description string `json:"description"` + IsActive bool `json:"is_active"` +} + +func (t *UserAccessToken) IsValid() *AppError { + if len(t.Id) != 26 { + return NewAppError("UserAccessToken.IsValid", "model.user_access_token.is_valid.id.app_error", nil, "", http.StatusBadRequest) + } + + if len(t.Token) != 26 { + return NewAppError("UserAccessToken.IsValid", "model.user_access_token.is_valid.token.app_error", nil, "", http.StatusBadRequest) + } + + if len(t.UserId) != 26 { + return NewAppError("UserAccessToken.IsValid", "model.user_access_token.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(t.Description) > 255 { + return NewAppError("UserAccessToken.IsValid", "model.user_access_token.is_valid.description.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (t *UserAccessToken) PreSave() { + t.Id = NewId() + t.IsActive = true +} + +func (t *UserAccessToken) ToJson() string { + b, _ := json.Marshal(t) + return string(b) +} + +func UserAccessTokenFromJson(data io.Reader) *UserAccessToken { + var t *UserAccessToken + json.NewDecoder(data).Decode(&t) + return t +} + +func UserAccessTokenListToJson(t []*UserAccessToken) string { + b, _ := json.Marshal(t) + return string(b) +} + +func UserAccessTokenListFromJson(data io.Reader) []*UserAccessToken { + var t []*UserAccessToken + json.NewDecoder(data).Decode(&t) + return t +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/user_access_token_search.go b/vendor/github.com/mattermost/mattermost-server/model/user_access_token_search.go new file mode 100644 index 00000000..1b0146ed --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/user_access_token_search.go @@ -0,0 +1,35 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type UserAccessTokenSearch struct { + Term string `json:"term"` +} + +// ToJson convert a UserAccessTokenSearch to json string +func (c *UserAccessTokenSearch) ToJson() string { + b, err := json.Marshal(c) + if err != nil { + return "" + } + + return string(b) +} + +// UserAccessTokenSearchJson decodes the input and returns a UserAccessTokenSearch +func UserAccessTokenSearchFromJson(data io.Reader) *UserAccessTokenSearch { + decoder := json.NewDecoder(data) + var cs UserAccessTokenSearch + err := decoder.Decode(&cs) + if err == nil { + return &cs + } + + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/user_autocomplete.go b/vendor/github.com/mattermost/mattermost-server/model/user_autocomplete.go new file mode 100644 index 00000000..b5edb45b --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/user_autocomplete.go @@ -0,0 +1,61 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type UserAutocompleteInChannel struct { + InChannel []*User `json:"in_channel"` + OutOfChannel []*User `json:"out_of_channel"` +} + +type UserAutocompleteInTeam struct { + InTeam []*User `json:"in_team"` +} + +type UserAutocomplete struct { + Users []*User `json:"users"` + OutOfChannel []*User `json:"out_of_channel,omitempty"` +} + +func (o *UserAutocomplete) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func UserAutocompleteFromJson(data io.Reader) *UserAutocomplete { + decoder := json.NewDecoder(data) + autocomplete := new(UserAutocomplete) + err := decoder.Decode(&autocomplete) + if err == nil { + return autocomplete + } else { + return nil + } +} + +func (o *UserAutocompleteInChannel) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func UserAutocompleteInChannelFromJson(data io.Reader) *UserAutocompleteInChannel { + var o *UserAutocompleteInChannel + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *UserAutocompleteInTeam) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func UserAutocompleteInTeamFromJson(data io.Reader) *UserAutocompleteInTeam { + var o *UserAutocompleteInTeam + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/user_search.go b/vendor/github.com/mattermost/mattermost-server/model/user_search.go new file mode 100644 index 00000000..94596bdc --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/user_search.go @@ -0,0 +1,32 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type UserSearch struct { + Term string `json:"term"` + TeamId string `json:"team_id"` + NotInTeamId string `json:"not_in_team_id"` + InChannelId string `json:"in_channel_id"` + NotInChannelId string `json:"not_in_channel_id"` + AllowInactive bool `json:"allow_inactive"` + WithoutTeam bool `json:"without_team"` +} + +// ToJson convert a User to a json string +func (u *UserSearch) ToJson() string { + b, _ := json.Marshal(u) + return string(b) +} + +// UserSearchFromJson will decode the input and return a User +func UserSearchFromJson(data io.Reader) *UserSearch { + var us *UserSearch + json.NewDecoder(data).Decode(&us) + return us +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/utils.go b/vendor/github.com/mattermost/mattermost-server/model/utils.go new file mode 100644 index 00000000..331a1aaa --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/utils.go @@ -0,0 +1,486 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "bytes" + "crypto/rand" + "encoding/base32" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/mail" + "net/url" + "regexp" + "strconv" + "strings" + "time" + "unicode" + + goi18n "github.com/nicksnyder/go-i18n/i18n" + "github.com/pborman/uuid" +) + +const ( + LOWERCASE_LETTERS = "abcdefghijklmnopqrstuvwxyz" + UPPERCASE_LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + NUMBERS = "0123456789" + SYMBOLS = " !\"\\#$%&'()*+,-./:;<=>?@[]^_`|~" +) + +type StringInterface map[string]interface{} +type StringMap map[string]string +type StringArray []string + +var translateFunc goi18n.TranslateFunc = nil + +func AppErrorInit(t goi18n.TranslateFunc) { + translateFunc = t +} + +type AppError struct { + Id string `json:"id"` + Message string `json:"message"` // Message to be display to the end user without debugging information + DetailedError string `json:"detailed_error"` // Internal error string to help the developer + RequestId string `json:"request_id,omitempty"` // The RequestId that's also set in the header + StatusCode int `json:"status_code,omitempty"` // The http status code + Where string `json:"-"` // The function where it happened in the form of Struct.Func + IsOAuth bool `json:"is_oauth,omitempty"` // Whether the error is OAuth specific + params map[string]interface{} +} + +func (er *AppError) Error() string { + return er.Where + ": " + er.Message + ", " + er.DetailedError +} + +func (er *AppError) Translate(T goi18n.TranslateFunc) { + if T == nil { + er.Message = er.Id + return + } + + if er.params == nil { + er.Message = T(er.Id) + } else { + er.Message = T(er.Id, er.params) + } +} + +func (er *AppError) SystemMessage(T goi18n.TranslateFunc) string { + if er.params == nil { + return T(er.Id) + } else { + return T(er.Id, er.params) + } +} + +func (er *AppError) ToJson() string { + b, _ := json.Marshal(er) + return string(b) +} + +// AppErrorFromJson will decode the input and return an AppError +func AppErrorFromJson(data io.Reader) *AppError { + str := "" + bytes, rerr := ioutil.ReadAll(data) + if rerr != nil { + str = rerr.Error() + } else { + str = string(bytes) + } + + decoder := json.NewDecoder(strings.NewReader(str)) + var er AppError + err := decoder.Decode(&er) + if err == nil { + return &er + } else { + return NewAppError("AppErrorFromJson", "model.utils.decode_json.app_error", nil, "body: "+str, http.StatusInternalServerError) + } +} + +func NewAppError(where string, id string, params map[string]interface{}, details string, status int) *AppError { + ap := &AppError{} + ap.Id = id + ap.params = params + ap.Message = id + ap.Where = where + ap.DetailedError = details + ap.StatusCode = status + ap.IsOAuth = false + ap.Translate(translateFunc) + return ap +} + +var encoding = base32.NewEncoding("ybndrfg8ejkmcpqxot1uwisza345h769") + +// NewId is a globally unique identifier. It is a [A-Z0-9] string 26 +// characters long. It is a UUID version 4 Guid that is zbased32 encoded +// with the padding stripped off. +func NewId() string { + var b bytes.Buffer + encoder := base32.NewEncoder(encoding, &b) + encoder.Write(uuid.NewRandom()) + encoder.Close() + b.Truncate(26) // removes the '==' padding + return b.String() +} + +func NewRandomString(length int) string { + var b bytes.Buffer + str := make([]byte, length+8) + rand.Read(str) + encoder := base32.NewEncoder(encoding, &b) + encoder.Write(str) + encoder.Close() + b.Truncate(length) // removes the '==' padding + return b.String() +} + +// GetMillis is a convience method to get milliseconds since epoch. +func GetMillis() int64 { + return time.Now().UnixNano() / int64(time.Millisecond) +} + +// MapToJson converts a map to a json string +func MapToJson(objmap map[string]string) string { + b, _ := json.Marshal(objmap) + return string(b) +} + +// MapToJson converts a map to a json string +func MapBoolToJson(objmap map[string]bool) string { + b, _ := json.Marshal(objmap) + return string(b) +} + +// MapFromJson will decode the key/value pair map +func MapFromJson(data io.Reader) map[string]string { + decoder := json.NewDecoder(data) + + var objmap map[string]string + if err := decoder.Decode(&objmap); err != nil { + return make(map[string]string) + } else { + return objmap + } +} + +// MapFromJson will decode the key/value pair map +func MapBoolFromJson(data io.Reader) map[string]bool { + decoder := json.NewDecoder(data) + + var objmap map[string]bool + if err := decoder.Decode(&objmap); err != nil { + return make(map[string]bool) + } else { + return objmap + } +} + +func ArrayToJson(objmap []string) string { + b, _ := json.Marshal(objmap) + return string(b) +} + +func ArrayFromJson(data io.Reader) []string { + decoder := json.NewDecoder(data) + + var objmap []string + if err := decoder.Decode(&objmap); err != nil { + return make([]string, 0) + } else { + return objmap + } +} + +func ArrayFromInterface(data interface{}) []string { + stringArray := []string{} + + dataArray, ok := data.([]interface{}) + if !ok { + return stringArray + } + + for _, v := range dataArray { + if str, ok := v.(string); ok { + stringArray = append(stringArray, str) + } + } + + return stringArray +} + +func StringInterfaceToJson(objmap map[string]interface{}) string { + b, _ := json.Marshal(objmap) + return string(b) +} + +func StringInterfaceFromJson(data io.Reader) map[string]interface{} { + decoder := json.NewDecoder(data) + + var objmap map[string]interface{} + if err := decoder.Decode(&objmap); err != nil { + return make(map[string]interface{}) + } else { + return objmap + } +} + +func StringToJson(s string) string { + b, _ := json.Marshal(s) + return string(b) +} + +func StringFromJson(data io.Reader) string { + decoder := json.NewDecoder(data) + + var s string + if err := decoder.Decode(&s); err != nil { + return "" + } else { + return s + } +} + +func GetServerIpAddress() string { + if addrs, err := net.InterfaceAddrs(); err != nil { + return "" + } else { + for _, addr := range addrs { + + if ip, ok := addr.(*net.IPNet); ok && !ip.IP.IsLoopback() { + if ip.IP.To4() != nil { + return ip.IP.String() + } + } + } + } + + return "" +} + +func IsLower(s string) bool { + return strings.ToLower(s) == s +} + +func IsValidEmail(email string) bool { + + if !IsLower(email) { + return false + } + + if _, err := mail.ParseAddress(email); err == nil { + return true + } + + return false +} + +var reservedName = []string{ + "signup", + "login", + "admin", + "channel", + "post", + "api", + "oauth", +} + +func IsValidChannelIdentifier(s string) bool { + + if !IsValidAlphaNumHyphenUnderscore(s, true) { + return false + } + + if len(s) < CHANNEL_NAME_MIN_LENGTH { + return false + } + + return true +} + +func IsValidAlphaNum(s string) bool { + validAlphaNum := regexp.MustCompile(`^[a-z0-9]+([a-z\-0-9]+|(__)?)[a-z0-9]+$`) + + return validAlphaNum.MatchString(s) +} + +func IsValidAlphaNumHyphenUnderscore(s string, withFormat bool) bool { + if withFormat { + validAlphaNumHyphenUnderscore := regexp.MustCompile(`^[a-z0-9]+([a-z\-\_0-9]+|(__)?)[a-z0-9]+$`) + return validAlphaNumHyphenUnderscore.MatchString(s) + } + + validSimpleAlphaNumHyphenUnderscore := regexp.MustCompile(`^[a-zA-Z0-9\-_]+$`) + return validSimpleAlphaNumHyphenUnderscore.MatchString(s) +} + +func Etag(parts ...interface{}) string { + + etag := CurrentVersion + + for _, part := range parts { + etag += fmt.Sprintf(".%v", part) + } + + return etag +} + +var validHashtag = regexp.MustCompile(`^(#\pL[\pL\d\-_.]*[\pL\d])$`) +var puncStart = regexp.MustCompile(`^[^\pL\d\s#]+`) +var hashtagStart = regexp.MustCompile(`^#{2,}`) +var puncEnd = regexp.MustCompile(`[^\pL\d\s]+$`) + +func ParseHashtags(text string) (string, string) { + words := strings.Fields(text) + + hashtagString := "" + plainString := "" + for _, word := range words { + // trim off surrounding punctuation + word = puncStart.ReplaceAllString(word, "") + word = puncEnd.ReplaceAllString(word, "") + + // and remove extra pound #s + word = hashtagStart.ReplaceAllString(word, "#") + + if validHashtag.MatchString(word) { + hashtagString += " " + word + } else { + plainString += " " + word + } + } + + if len(hashtagString) > 1000 { + hashtagString = hashtagString[:999] + lastSpace := strings.LastIndex(hashtagString, " ") + if lastSpace > -1 { + hashtagString = hashtagString[:lastSpace] + } else { + hashtagString = "" + } + } + + return strings.TrimSpace(hashtagString), strings.TrimSpace(plainString) +} + +func IsFileExtImage(ext string) bool { + ext = strings.ToLower(ext) + for _, imgExt := range IMAGE_EXTENSIONS { + if ext == imgExt { + return true + } + } + return false +} + +func GetImageMimeType(ext string) string { + ext = strings.ToLower(ext) + if len(IMAGE_MIME_TYPES[ext]) == 0 { + return "image" + } else { + return IMAGE_MIME_TYPES[ext] + } +} + +func ClearMentionTags(post string) string { + post = strings.Replace(post, "<mention>", "", -1) + post = strings.Replace(post, "</mention>", "", -1) + return post +} + +var UrlRegex = regexp.MustCompile(`^((?:[a-z]+:\/\/)?(?:(?:[a-z0-9\-]+\.)+(?:[a-z]{2}|aero|arpa|biz|com|coop|edu|gov|info|int|jobs|mil|museum|name|nato|net|org|pro|travel|local|internal))(:[0-9]{1,5})?(?:\/[a-z0-9_\-\.~]+)*(\/([a-z0-9_\-\.]*)(?:\?[a-z0-9+_~\-\.%=&]*)?)?(?:#[a-zA-Z0-9!$&'()*+.=-_~:@/?]*)?)(?:\s+|$)$`) +var PartialUrlRegex = regexp.MustCompile(`/([A-Za-z0-9]{26})/([A-Za-z0-9]{26})/((?:[A-Za-z0-9]{26})?.+(?:\.[A-Za-z0-9]{3,})?)`) + +func IsValidHttpUrl(rawUrl string) bool { + if strings.Index(rawUrl, "http://") != 0 && strings.Index(rawUrl, "https://") != 0 { + return false + } + + if _, err := url.ParseRequestURI(rawUrl); err != nil { + return false + } + + return true +} + +func IsValidHttpsUrl(rawUrl string) bool { + if strings.Index(rawUrl, "https://") != 0 { + return false + } + + if _, err := url.ParseRequestURI(rawUrl); err != nil { + return false + } + + return true +} + +func IsValidTurnOrStunServer(rawUri string) bool { + if strings.Index(rawUri, "turn:") != 0 && strings.Index(rawUri, "stun:") != 0 { + return false + } + + if _, err := url.ParseRequestURI(rawUri); err != nil { + return false + } + + return true +} + +func IsSafeLink(link *string) bool { + if link != nil { + if IsValidHttpUrl(*link) { + return true + } else if strings.HasPrefix(*link, "/") { + return true + } else { + return false + } + } + + return true +} + +func IsValidWebsocketUrl(rawUrl string) bool { + if strings.Index(rawUrl, "ws://") != 0 && strings.Index(rawUrl, "wss://") != 0 { + return false + } + + if _, err := url.ParseRequestURI(rawUrl); err != nil { + return false + } + + return true +} + +func IsValidTrueOrFalseString(value string) bool { + return value == "true" || value == "false" +} + +func IsValidNumberString(value string) bool { + if _, err := strconv.Atoi(value); err != nil { + return false + } + + return true +} + +func IsValidId(value string) bool { + if len(value) != 26 { + return false + } + + for _, r := range value { + if !unicode.IsLetter(r) && !unicode.IsNumber(r) { + return false + } + } + + return true +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/version.go b/vendor/github.com/mattermost/mattermost-server/model/version.go new file mode 100644 index 00000000..1bd7baec --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/version.go @@ -0,0 +1,148 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "fmt" + "strconv" + "strings" +) + +// This is a list of all the current viersions including any patches. +// It should be maitained in chronological order with most current +// release at the front of the list. +var versions = []string{ + "4.7.0", + "4.6.0", + "4.5.0", + "4.4.0", + "4.3.0", + "4.2.0", + "4.1.0", + "4.0.0", + "3.10.0", + "3.9.0", + "3.8.0", + "3.7.0", + "3.6.0", + "3.5.0", + "3.4.0", + "3.3.0", + "3.2.0", + "3.1.0", + "3.0.0", + "2.2.0", + "2.1.0", + "2.0.0", + "1.4.0", + "1.3.0", + "1.2.1", + "1.2.0", + "1.1.0", + "1.0.0", + "0.7.1", + "0.7.0", + "0.6.0", + "0.5.0", +} + +var CurrentVersion string = versions[0] +var BuildNumber string +var BuildDate string +var BuildHash string +var BuildHashEnterprise string +var BuildEnterpriseReady string +var versionsWithoutHotFixes []string + +func init() { + versionsWithoutHotFixes = make([]string, 0, len(versions)) + seen := make(map[string]string) + + for _, version := range versions { + maj, min, _ := SplitVersion(version) + verStr := fmt.Sprintf("%v.%v.0", maj, min) + + if seen[verStr] == "" { + versionsWithoutHotFixes = append(versionsWithoutHotFixes, verStr) + seen[verStr] = verStr + } + } +} + +func SplitVersion(version string) (int64, int64, int64) { + parts := strings.Split(version, ".") + + major := int64(0) + minor := int64(0) + patch := int64(0) + + if len(parts) > 0 { + major, _ = strconv.ParseInt(parts[0], 10, 64) + } + + if len(parts) > 1 { + minor, _ = strconv.ParseInt(parts[1], 10, 64) + } + + if len(parts) > 2 { + patch, _ = strconv.ParseInt(parts[2], 10, 64) + } + + return major, minor, patch +} + +func GetPreviousVersion(version string) string { + verMajor, verMinor, _ := SplitVersion(version) + verStr := fmt.Sprintf("%v.%v.0", verMajor, verMinor) + + for index, v := range versionsWithoutHotFixes { + if v == verStr && len(versionsWithoutHotFixes) > index+1 { + return versionsWithoutHotFixes[index+1] + } + } + + return "" +} + +func IsOfficalBuild() bool { + return BuildNumber != "_BUILD_NUMBER_" +} + +func IsCurrentVersion(versionToCheck string) bool { + currentMajor, currentMinor, _ := SplitVersion(CurrentVersion) + toCheckMajor, toCheckMinor, _ := SplitVersion(versionToCheck) + + if toCheckMajor == currentMajor && toCheckMinor == currentMinor { + return true + } else { + return false + } +} + +func IsPreviousVersionsSupported(versionToCheck string) bool { + toCheckMajor, toCheckMinor, _ := SplitVersion(versionToCheck) + versionToCheckStr := fmt.Sprintf("%v.%v.0", toCheckMajor, toCheckMinor) + + // Current Supported + if versionsWithoutHotFixes[0] == versionToCheckStr { + return true + } + + // Current - 1 Supported + if versionsWithoutHotFixes[1] == versionToCheckStr { + return true + } + + // Current - 2 Supported + if versionsWithoutHotFixes[2] == versionToCheckStr { + return true + } + + // Current - 3 Supported + if versionsWithoutHotFixes[3] == versionToCheckStr { + return true + } + + return false +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/webrtc.go b/vendor/github.com/mattermost/mattermost-server/model/webrtc.go new file mode 100644 index 00000000..59797a5b --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/webrtc.go @@ -0,0 +1,39 @@ +// Copyright (c) 2017 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type WebrtcInfoResponse struct { + Token string `json:"token"` + GatewayUrl string `json:"gateway_url"` + StunUri string `json:"stun_uri,omitempty"` + TurnUri string `json:"turn_uri,omitempty"` + TurnPassword string `json:"turn_password,omitempty"` + TurnUsername string `json:"turn_username,omitempty"` +} + +type GatewayResponse struct { + Status string `json:"janus"` +} + +func GatewayResponseFromJson(data io.Reader) *GatewayResponse { + var o *GatewayResponse + json.NewDecoder(data).Decode(&o) + return o +} + +func (o *WebrtcInfoResponse) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func WebrtcInfoResponseFromJson(data io.Reader) *WebrtcInfoResponse { + var o *WebrtcInfoResponse + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/websocket_client.go b/vendor/github.com/mattermost/mattermost-server/model/websocket_client.go new file mode 100644 index 00000000..e5c44dde --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/websocket_client.go @@ -0,0 +1,167 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "net/http" + + "github.com/gorilla/websocket" +) + +const ( + SOCKET_MAX_MESSAGE_SIZE_KB = 8 * 1024 // 8KB +) + +type WebSocketClient struct { + Url string // The location of the server like "ws://localhost:8065" + ApiUrl string // The api location of the server like "ws://localhost:8065/api/v3" + ConnectUrl string // The websocket URL to connect to like "ws://localhost:8065/api/v3/path/to/websocket" + Conn *websocket.Conn // The WebSocket connection + AuthToken string // The token used to open the WebSocket + Sequence int64 // The ever-incrementing sequence attached to each WebSocket action + EventChannel chan *WebSocketEvent + ResponseChannel chan *WebSocketResponse + ListenError *AppError +} + +// NewWebSocketClient constructs a new WebSocket client with convienence +// methods for talking to the server. +func NewWebSocketClient(url, authToken string) (*WebSocketClient, *AppError) { + conn, _, err := websocket.DefaultDialer.Dial(url+API_URL_SUFFIX_V3+"/users/websocket", nil) + if err != nil { + return nil, NewAppError("NewWebSocketClient", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) + } + + client := &WebSocketClient{ + url, + url + API_URL_SUFFIX_V3, + url + API_URL_SUFFIX_V3 + "/users/websocket", + conn, + authToken, + 1, + make(chan *WebSocketEvent, 100), + make(chan *WebSocketResponse, 100), + nil, + } + + client.SendMessage(WEBSOCKET_AUTHENTICATION_CHALLENGE, map[string]interface{}{"token": authToken}) + + return client, nil +} + +// NewWebSocketClient4 constructs a new WebSocket client with convienence +// methods for talking to the server. Uses the v4 endpoint. +func NewWebSocketClient4(url, authToken string) (*WebSocketClient, *AppError) { + conn, _, err := websocket.DefaultDialer.Dial(url+API_URL_SUFFIX+"/websocket", nil) + if err != nil { + return nil, NewAppError("NewWebSocketClient4", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) + } + + client := &WebSocketClient{ + url, + url + API_URL_SUFFIX, + url + API_URL_SUFFIX + "/websocket", + conn, + authToken, + 1, + make(chan *WebSocketEvent, 100), + make(chan *WebSocketResponse, 100), + nil, + } + + client.SendMessage(WEBSOCKET_AUTHENTICATION_CHALLENGE, map[string]interface{}{"token": authToken}) + + return client, nil +} + +func (wsc *WebSocketClient) Connect() *AppError { + var err error + wsc.Conn, _, err = websocket.DefaultDialer.Dial(wsc.ConnectUrl, nil) + if err != nil { + return NewAppError("Connect", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) + } + + wsc.EventChannel = make(chan *WebSocketEvent, 100) + wsc.ResponseChannel = make(chan *WebSocketResponse, 100) + + wsc.SendMessage(WEBSOCKET_AUTHENTICATION_CHALLENGE, map[string]interface{}{"token": wsc.AuthToken}) + + return nil +} + +func (wsc *WebSocketClient) Close() { + wsc.Conn.Close() +} + +func (wsc *WebSocketClient) Listen() { + go func() { + defer func() { + wsc.Conn.Close() + close(wsc.EventChannel) + close(wsc.ResponseChannel) + }() + + for { + var rawMsg json.RawMessage + var err error + if _, rawMsg, err = wsc.Conn.ReadMessage(); err != nil { + if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseNoStatusReceived) { + wsc.ListenError = NewAppError("NewWebSocketClient", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) + } + + return + } + + var event WebSocketEvent + if err := json.Unmarshal(rawMsg, &event); err == nil && event.IsValid() { + wsc.EventChannel <- &event + continue + } + + var response WebSocketResponse + if err := json.Unmarshal(rawMsg, &response); err == nil && response.IsValid() { + wsc.ResponseChannel <- &response + continue + } + + } + }() +} + +func (wsc *WebSocketClient) SendMessage(action string, data map[string]interface{}) { + req := &WebSocketRequest{} + req.Seq = wsc.Sequence + req.Action = action + req.Data = data + + wsc.Sequence++ + + wsc.Conn.WriteJSON(req) +} + +// UserTyping will push a user_typing event out to all connected users +// who are in the specified channel +func (wsc *WebSocketClient) UserTyping(channelId, parentId string) { + data := map[string]interface{}{ + "channel_id": channelId, + "parent_id": parentId, + } + + wsc.SendMessage("user_typing", data) +} + +// GetStatuses will return a map of string statuses using user id as the key +func (wsc *WebSocketClient) GetStatuses() { + wsc.SendMessage("get_statuses", nil) +} + +// GetStatusesByIds will fetch certain user statuses based on ids and return +// a map of string statuses using user id as the key +func (wsc *WebSocketClient) GetStatusesByIds(userIds []string) { + data := map[string]interface{}{ + "user_ids": userIds, + } + wsc.SendMessage("get_statuses_by_ids", data) +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/websocket_message.go b/vendor/github.com/mattermost/mattermost-server/model/websocket_message.go new file mode 100644 index 00000000..0256e400 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/websocket_message.go @@ -0,0 +1,132 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + WEBSOCKET_EVENT_TYPING = "typing" + WEBSOCKET_EVENT_POSTED = "posted" + WEBSOCKET_EVENT_POST_EDITED = "post_edited" + WEBSOCKET_EVENT_POST_DELETED = "post_deleted" + WEBSOCKET_EVENT_CHANNEL_DELETED = "channel_deleted" + WEBSOCKET_EVENT_CHANNEL_CREATED = "channel_created" + WEBSOCKET_EVENT_CHANNEL_UPDATED = "channel_updated" + WEBSOCKET_EVENT_DIRECT_ADDED = "direct_added" + WEBSOCKET_EVENT_GROUP_ADDED = "group_added" + WEBSOCKET_EVENT_NEW_USER = "new_user" + WEBSOCKET_EVENT_ADDED_TO_TEAM = "added_to_team" + WEBSOCKET_EVENT_LEAVE_TEAM = "leave_team" + WEBSOCKET_EVENT_UPDATE_TEAM = "update_team" + WEBSOCKET_EVENT_DELETE_TEAM = "delete_team" + WEBSOCKET_EVENT_USER_ADDED = "user_added" + WEBSOCKET_EVENT_USER_UPDATED = "user_updated" + WEBSOCKET_EVENT_USER_ROLE_UPDATED = "user_role_updated" + WEBSOCKET_EVENT_MEMBERROLE_UPDATED = "memberrole_updated" + WEBSOCKET_EVENT_USER_REMOVED = "user_removed" + WEBSOCKET_EVENT_PREFERENCE_CHANGED = "preference_changed" + WEBSOCKET_EVENT_PREFERENCES_CHANGED = "preferences_changed" + WEBSOCKET_EVENT_PREFERENCES_DELETED = "preferences_deleted" + WEBSOCKET_EVENT_EPHEMERAL_MESSAGE = "ephemeral_message" + WEBSOCKET_EVENT_STATUS_CHANGE = "status_change" + WEBSOCKET_EVENT_HELLO = "hello" + WEBSOCKET_EVENT_WEBRTC = "webrtc" + WEBSOCKET_AUTHENTICATION_CHALLENGE = "authentication_challenge" + WEBSOCKET_EVENT_REACTION_ADDED = "reaction_added" + WEBSOCKET_EVENT_REACTION_REMOVED = "reaction_removed" + WEBSOCKET_EVENT_RESPONSE = "response" + WEBSOCKET_EVENT_EMOJI_ADDED = "emoji_added" + WEBSOCKET_EVENT_CHANNEL_VIEWED = "channel_viewed" + WEBSOCKET_EVENT_PLUGIN_ACTIVATED = "plugin_activated" // EXPERIMENTAL - SUBJECT TO CHANGE + WEBSOCKET_EVENT_PLUGIN_DEACTIVATED = "plugin_deactivated" // EXPERIMENTAL - SUBJECT TO CHANGE +) + +type WebSocketMessage interface { + ToJson() string + IsValid() bool + EventType() string +} + +type WebsocketBroadcast struct { + OmitUsers map[string]bool `json:"omit_users"` // broadcast is omitted for users listed here + UserId string `json:"user_id"` // broadcast only occurs for this user + ChannelId string `json:"channel_id"` // broadcast only occurs for users in this channel + TeamId string `json:"team_id"` // broadcast only occurs for users in this team +} + +type WebSocketEvent struct { + Event string `json:"event"` + Data map[string]interface{} `json:"data"` + Broadcast *WebsocketBroadcast `json:"broadcast"` + Sequence int64 `json:"seq"` +} + +func (m *WebSocketEvent) Add(key string, value interface{}) { + m.Data[key] = value +} + +func NewWebSocketEvent(event, teamId, channelId, userId string, omitUsers map[string]bool) *WebSocketEvent { + return &WebSocketEvent{Event: event, Data: make(map[string]interface{}), + Broadcast: &WebsocketBroadcast{TeamId: teamId, ChannelId: channelId, UserId: userId, OmitUsers: omitUsers}} +} + +func (o *WebSocketEvent) IsValid() bool { + return o.Event != "" +} + +func (o *WebSocketEvent) EventType() string { + return o.Event +} + +func (o *WebSocketEvent) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func WebSocketEventFromJson(data io.Reader) *WebSocketEvent { + var o *WebSocketEvent + json.NewDecoder(data).Decode(&o) + return o +} + +type WebSocketResponse struct { + Status string `json:"status"` + SeqReply int64 `json:"seq_reply,omitempty"` + Data map[string]interface{} `json:"data,omitempty"` + Error *AppError `json:"error,omitempty"` +} + +func (m *WebSocketResponse) Add(key string, value interface{}) { + m.Data[key] = value +} + +func NewWebSocketResponse(status string, seqReply int64, data map[string]interface{}) *WebSocketResponse { + return &WebSocketResponse{Status: status, SeqReply: seqReply, Data: data} +} + +func NewWebSocketError(seqReply int64, err *AppError) *WebSocketResponse { + return &WebSocketResponse{Status: STATUS_FAIL, SeqReply: seqReply, Error: err} +} + +func (o *WebSocketResponse) IsValid() bool { + return o.Status != "" +} + +func (o *WebSocketResponse) EventType() string { + return WEBSOCKET_EVENT_RESPONSE +} + +func (o *WebSocketResponse) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func WebSocketResponseFromJson(data io.Reader) *WebSocketResponse { + var o *WebSocketResponse + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/mattermost-server/model/websocket_request.go b/vendor/github.com/mattermost/mattermost-server/model/websocket_request.go new file mode 100644 index 00000000..4da626e2 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/model/websocket_request.go @@ -0,0 +1,34 @@ +// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + + goi18n "github.com/nicksnyder/go-i18n/i18n" +) + +type WebSocketRequest struct { + // Client-provided fields + Seq int64 `json:"seq"` + Action string `json:"action"` + Data map[string]interface{} `json:"data"` + + // Server-provided fields + Session Session `json:"-"` + T goi18n.TranslateFunc `json:"-"` + Locale string `json:"-"` +} + +func (o *WebSocketRequest) ToJson() string { + b, _ := json.Marshal(o) + return string(b) +} + +func WebSocketRequestFromJson(data io.Reader) *WebSocketRequest { + var o *WebSocketRequest + json.NewDecoder(data).Decode(&o) + return o +} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/LICENSE.txt index ead98cf0..ead98cf0 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/LICENSE.txt +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/LICENSE.txt diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/alecthomas/log4go/config.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/config.go index 577c3eb2..577c3eb2 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/alecthomas/log4go/config.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/config.go diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/ConsoleLogWriter_Manual.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/ConsoleLogWriter_Manual.go new file mode 100644 index 00000000..698dd332 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/ConsoleLogWriter_Manual.go @@ -0,0 +1,14 @@ +package main + +import ( + "time" +) + +import l4g "code.google.com/p/log4go" + +func main() { + log := l4g.NewLogger() + defer log.Close() + log.AddFilter("stdout", l4g.DEBUG, l4g.NewConsoleLogWriter()) + log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02")) +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/FileLogWriter_Manual.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/FileLogWriter_Manual.go new file mode 100644 index 00000000..efd596aa --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/FileLogWriter_Manual.go @@ -0,0 +1,57 @@ +package main + +import ( + "bufio" + "fmt" + "io" + "os" + "time" +) + +import l4g "code.google.com/p/log4go" + +const ( + filename = "flw.log" +) + +func main() { + // Get a new logger instance + log := l4g.NewLogger() + + // Create a default logger that is logging messages of FINE or higher + log.AddFilter("file", l4g.FINE, l4g.NewFileLogWriter(filename, false)) + log.Close() + + /* Can also specify manually via the following: (these are the defaults) */ + flw := l4g.NewFileLogWriter(filename, false) + flw.SetFormat("[%D %T] [%L] (%S) %M") + flw.SetRotate(false) + flw.SetRotateSize(0) + flw.SetRotateLines(0) + flw.SetRotateDaily(false) + log.AddFilter("file", l4g.FINE, flw) + + // Log some experimental messages + log.Finest("Everything is created now (notice that I will not be printing to the file)") + log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02")) + log.Critical("Time to close out!") + + // Close the log + log.Close() + + // Print what was logged to the file (yes, I know I'm skipping error checking) + fd, _ := os.Open(filename) + in := bufio.NewReader(fd) + fmt.Print("Messages logged to file were: (line numbers not included)\n") + for lineno := 1; ; lineno++ { + line, err := in.ReadString('\n') + if err == io.EOF { + break + } + fmt.Printf("%3d:\t%s", lineno, line) + } + fd.Close() + + // Remove the file so it's not lying around + os.Remove(filename) +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/SimpleNetLogServer.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/SimpleNetLogServer.go new file mode 100644 index 00000000..83c80ad1 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/SimpleNetLogServer.go @@ -0,0 +1,42 @@ +package main + +import ( + "flag" + "fmt" + "net" + "os" +) + +var ( + port = flag.String("p", "12124", "Port number to listen on") +) + +func e(err error) { + if err != nil { + fmt.Printf("Erroring out: %s\n", err) + os.Exit(1) + } +} + +func main() { + flag.Parse() + + // Bind to the port + bind, err := net.ResolveUDPAddr("0.0.0.0:" + *port) + e(err) + + // Create listener + listener, err := net.ListenUDP("udp", bind) + e(err) + + fmt.Printf("Listening to port %s...\n", *port) + for { + // read into a new buffer + buffer := make([]byte, 1024) + _, _, err := listener.ReadFrom(buffer) + e(err) + + // log to standard output + fmt.Println(string(buffer)) + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/SocketLogWriter_Manual.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/SocketLogWriter_Manual.go new file mode 100644 index 00000000..400b698c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/SocketLogWriter_Manual.go @@ -0,0 +1,18 @@ +package main + +import ( + "time" +) + +import l4g "code.google.com/p/log4go" + +func main() { + log := l4g.NewLogger() + log.AddFilter("network", l4g.FINEST, l4g.NewSocketLogWriter("udp", "192.168.1.255:12124")) + + // Run `nc -u -l -p 12124` or similar before you run this to see the following message + log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02")) + + // This makes sure the output stream buffer is written + log.Close() +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/XMLConfigurationExample.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/XMLConfigurationExample.go new file mode 100644 index 00000000..164c2add --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/examples/XMLConfigurationExample.go @@ -0,0 +1,13 @@ +package main + +import l4g "code.google.com/p/log4go" + +func main() { + // Load the configuration (isn't this easy?) + l4g.LoadConfiguration("example.xml") + + // And now we're ready! + l4g.Finest("This will only go to those of you really cool UDP kids! If you change enabled=true.") + l4g.Debug("Oh no! %d + %d = %d!", 2, 2, 2+2) + l4g.Info("About that time, eh chaps?") +} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/alecthomas/log4go/filelog.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/filelog.go index ee0ab0c0..9bc4df15 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/alecthomas/log4go/filelog.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/filelog.go @@ -47,7 +47,6 @@ func (w *FileLogWriter) LogWrite(rec *LogRecord) { func (w *FileLogWriter) Close() { close(w.rec) - w.file.Sync() } // NewFileLogWriter creates a new LogWriter which writes to the given file and @@ -79,6 +78,7 @@ func NewFileLogWriter(fname string, rotate bool) *FileLogWriter { defer func() { if w.file != nil { fmt.Fprint(w.file, FormatLogRecord(w.trailer, &LogRecord{Created: time.Now()})) + w.file.Sync() w.file.Close() } }() diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/alecthomas/log4go/log4go.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/log4go.go index 822e890c..822e890c 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/alecthomas/log4go/log4go.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/log4go.go diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/alecthomas/log4go/pattlog.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/pattlog.go index 82b4e36b..98632e4d 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/alecthomas/log4go/pattlog.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/pattlog.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "strings" + "sync" ) const ( @@ -22,6 +23,7 @@ type formatCacheType struct { } var formatCache = &formatCacheType{} +var mutex sync.Mutex // Known format codes: // %T - Time (15:04:05 MST) @@ -44,6 +46,7 @@ func FormatLogRecord(format string, rec *LogRecord) string { out := bytes.NewBuffer(make([]byte, 0, 64)) secs := rec.Created.UnixNano() / 1e9 + mutex.Lock() cache := *formatCache if cache.LastUpdateSeconds != secs { month, day, year := rec.Created.Month(), rec.Created.Day(), rec.Created.Year() @@ -59,6 +62,7 @@ func FormatLogRecord(format string, rec *LogRecord) string { cache = *updated formatCache = updated } + mutex.Unlock() // Split the string into pieces by % signs pieces := bytes.Split([]byte(format), []byte{'%'}) diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/alecthomas/log4go/socklog.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/socklog.go index 1d224a99..1d224a99 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/alecthomas/log4go/socklog.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/socklog.go diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/alecthomas/log4go/termlog.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/termlog.go index 8a941e26..8a941e26 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/alecthomas/log4go/termlog.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/termlog.go diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/alecthomas/log4go/wrapper.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/wrapper.go index 2ae222b0..2ae222b0 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/alecthomas/log4go/wrapper.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go/wrapper.go diff --git a/vendor/github.com/mattermost/platform/vendor/golang.org/x/crypto/bcrypt/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/LICENSE.txt index ead98cf0..ead98cf0 100644 --- a/vendor/github.com/mattermost/platform/vendor/golang.org/x/crypto/bcrypt/LICENSE.txt +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/LICENSE.txt diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/add.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/add.go new file mode 100644 index 00000000..0e5f6cdb --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/add.go @@ -0,0 +1,113 @@ +// +// https://tools.ietf.org/html/rfc4511 +// +// AddRequest ::= [APPLICATION 8] SEQUENCE { +// entry LDAPDN, +// attributes AttributeList } +// +// AttributeList ::= SEQUENCE OF attribute Attribute + +package ldap + +import ( + "errors" + "log" + + "gopkg.in/asn1-ber.v1" +) + +// Attribute represents an LDAP attribute +type Attribute struct { + // Type is the name of the LDAP attribute + Type string + // Vals are the LDAP attribute values + Vals []string +} + +func (a *Attribute) encode() *ber.Packet { + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attribute") + seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.Type, "Type")) + set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue") + for _, value := range a.Vals { + set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals")) + } + seq.AppendChild(set) + return seq +} + +// AddRequest represents an LDAP AddRequest operation +type AddRequest struct { + // DN identifies the entry being added + DN string + // Attributes list the attributes of the new entry + Attributes []Attribute +} + +func (a AddRequest) encode() *ber.Packet { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationAddRequest, nil, "Add Request") + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.DN, "DN")) + attributes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes") + for _, attribute := range a.Attributes { + attributes.AppendChild(attribute.encode()) + } + request.AppendChild(attributes) + return request +} + +// Attribute adds an attribute with the given type and values +func (a *AddRequest) Attribute(attrType string, attrVals []string) { + a.Attributes = append(a.Attributes, Attribute{Type: attrType, Vals: attrVals}) +} + +// NewAddRequest returns an AddRequest for the given DN, with no attributes +func NewAddRequest(dn string) *AddRequest { + return &AddRequest{ + DN: dn, + } + +} + +// Add performs the given AddRequest +func (l *Conn) Add(addRequest *AddRequest) error { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + packet.AppendChild(addRequest.encode()) + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationAddResponse { + resultCode, resultDescription := getLDAPResultCode(packet) + if resultCode != 0 { + return NewError(resultCode, errors.New(resultDescription)) + } + } else { + log.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + + l.Debug.Printf("%d: returning", msgCtx.id) + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/atomic_value.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/atomic_value.go new file mode 100644 index 00000000..bccf7573 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/atomic_value.go @@ -0,0 +1,13 @@ +// +build go1.4 + +package ldap + +import ( + "sync/atomic" +) + +// For compilers that support it, we just use the underlying sync/atomic.Value +// type. +type atomicValue struct { + atomic.Value +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/atomic_value_go13.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/atomic_value_go13.go new file mode 100644 index 00000000..04920bb2 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/atomic_value_go13.go @@ -0,0 +1,28 @@ +// +build !go1.4 + +package ldap + +import ( + "sync" +) + +// This is a helper type that emulates the use of the "sync/atomic.Value" +// struct that's available in Go 1.4 and up. +type atomicValue struct { + value interface{} + lock sync.RWMutex +} + +func (av *atomicValue) Store(val interface{}) { + av.lock.Lock() + av.value = val + av.lock.Unlock() +} + +func (av *atomicValue) Load() interface{} { + av.lock.RLock() + ret := av.value + av.lock.RUnlock() + + return ret +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/bind.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/bind.go new file mode 100644 index 00000000..26b3cc72 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/bind.go @@ -0,0 +1,143 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ldap + +import ( + "errors" + + "gopkg.in/asn1-ber.v1" +) + +// SimpleBindRequest represents a username/password bind operation +type SimpleBindRequest struct { + // Username is the name of the Directory object that the client wishes to bind as + Username string + // Password is the credentials to bind with + Password string + // Controls are optional controls to send with the bind request + Controls []Control +} + +// SimpleBindResult contains the response from the server +type SimpleBindResult struct { + Controls []Control +} + +// NewSimpleBindRequest returns a bind request +func NewSimpleBindRequest(username string, password string, controls []Control) *SimpleBindRequest { + return &SimpleBindRequest{ + Username: username, + Password: password, + Controls: controls, + } +} + +func (bindRequest *SimpleBindRequest) encode() *ber.Packet { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request") + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version")) + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, bindRequest.Username, "User Name")) + request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, bindRequest.Password, "Password")) + + request.AppendChild(encodeControls(bindRequest.Controls)) + + return request +} + +// SimpleBind performs the simple bind operation defined in the given request +func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + encodedBindRequest := simpleBindRequest.encode() + packet.AppendChild(encodedBindRequest) + + if l.Debug { + ber.PrintPacket(packet) + } + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + packetResponse, ok := <-msgCtx.responses + if !ok { + return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return nil, err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return nil, err + } + ber.PrintPacket(packet) + } + + result := &SimpleBindResult{ + Controls: make([]Control, 0), + } + + if len(packet.Children) == 3 { + for _, child := range packet.Children[2].Children { + result.Controls = append(result.Controls, DecodeControl(child)) + } + } + + resultCode, resultDescription := getLDAPResultCode(packet) + if resultCode != 0 { + return result, NewError(resultCode, errors.New(resultDescription)) + } + + return result, nil +} + +// Bind performs a bind with the given username and password +func (l *Conn) Bind(username, password string) error { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + bindRequest := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request") + bindRequest.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version")) + bindRequest.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, username, "User Name")) + bindRequest.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, password, "Password")) + packet.AppendChild(bindRequest) + + if l.Debug { + ber.PrintPacket(packet) + } + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return err + } + ber.PrintPacket(packet) + } + + resultCode, resultDescription := getLDAPResultCode(packet) + if resultCode != 0 { + return NewError(resultCode, errors.New(resultDescription)) + } + + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/client.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/client.go new file mode 100644 index 00000000..055b27b5 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/client.go @@ -0,0 +1,27 @@ +package ldap + +import ( + "crypto/tls" + "time" +) + +// Client knows how to interact with an LDAP server +type Client interface { + Start() + StartTLS(config *tls.Config) error + Close() + SetTimeout(time.Duration) + + Bind(username, password string) error + SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) + + Add(addRequest *AddRequest) error + Del(delRequest *DelRequest) error + Modify(modifyRequest *ModifyRequest) error + + Compare(dn, attribute, value string) (bool, error) + PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) + + Search(searchRequest *SearchRequest) (*SearchResult, error) + SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/compare.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/compare.go new file mode 100644 index 00000000..cc6d2af5 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/compare.go @@ -0,0 +1,85 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// File contains Compare functionality +// +// https://tools.ietf.org/html/rfc4511 +// +// CompareRequest ::= [APPLICATION 14] SEQUENCE { +// entry LDAPDN, +// ava AttributeValueAssertion } +// +// AttributeValueAssertion ::= SEQUENCE { +// attributeDesc AttributeDescription, +// assertionValue AssertionValue } +// +// AttributeDescription ::= LDAPString +// -- Constrained to <attributedescription> +// -- [RFC4512] +// +// AttributeValue ::= OCTET STRING +// + +package ldap + +import ( + "errors" + "fmt" + + "gopkg.in/asn1-ber.v1" +) + +// Compare checks to see if the attribute of the dn matches value. Returns true if it does otherwise +// false with any error that occurs if any. +func (l *Conn) Compare(dn, attribute, value string) (bool, error) { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationCompareRequest, nil, "Compare Request") + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, dn, "DN")) + + ava := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "AttributeValueAssertion") + ava.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "AttributeDesc")) + ava.AppendChild(ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagOctetString, value, "AssertionValue")) + request.AppendChild(ava) + packet.AppendChild(request) + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return false, err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return false, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return false, err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return false, err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationCompareResponse { + resultCode, resultDescription := getLDAPResultCode(packet) + if resultCode == LDAPResultCompareTrue { + return true, nil + } else if resultCode == LDAPResultCompareFalse { + return false, nil + } else { + return false, NewError(resultCode, errors.New(resultDescription)) + } + } + return false, fmt.Errorf("Unexpected Response: %d", packet.Children[1].Tag) +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/conn.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/conn.go new file mode 100644 index 00000000..eb28eb47 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/conn.go @@ -0,0 +1,470 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ldap + +import ( + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "gopkg.in/asn1-ber.v1" +) + +const ( + // MessageQuit causes the processMessages loop to exit + MessageQuit = 0 + // MessageRequest sends a request to the server + MessageRequest = 1 + // MessageResponse receives a response from the server + MessageResponse = 2 + // MessageFinish indicates the client considers a particular message ID to be finished + MessageFinish = 3 + // MessageTimeout indicates the client-specified timeout for a particular message ID has been reached + MessageTimeout = 4 +) + +// PacketResponse contains the packet or error encountered reading a response +type PacketResponse struct { + // Packet is the packet read from the server + Packet *ber.Packet + // Error is an error encountered while reading + Error error +} + +// ReadPacket returns the packet or an error +func (pr *PacketResponse) ReadPacket() (*ber.Packet, error) { + if (pr == nil) || (pr.Packet == nil && pr.Error == nil) { + return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve response")) + } + return pr.Packet, pr.Error +} + +type messageContext struct { + id int64 + // close(done) should only be called from finishMessage() + done chan struct{} + // close(responses) should only be called from processMessages(), and only sent to from sendResponse() + responses chan *PacketResponse +} + +// sendResponse should only be called within the processMessages() loop which +// is also responsible for closing the responses channel. +func (msgCtx *messageContext) sendResponse(packet *PacketResponse) { + select { + case msgCtx.responses <- packet: + // Successfully sent packet to message handler. + case <-msgCtx.done: + // The request handler is done and will not receive more + // packets. + } +} + +type messagePacket struct { + Op int + MessageID int64 + Packet *ber.Packet + Context *messageContext +} + +type sendMessageFlags uint + +const ( + startTLS sendMessageFlags = 1 << iota +) + +// Conn represents an LDAP Connection +type Conn struct { + conn net.Conn + isTLS bool + closing uint32 + closeErr atomicValue + isStartingTLS bool + Debug debugging + chanConfirm chan struct{} + messageContexts map[int64]*messageContext + chanMessage chan *messagePacket + chanMessageID chan int64 + wgClose sync.WaitGroup + outstandingRequests uint + messageMutex sync.Mutex + requestTimeout int64 +} + +var _ Client = &Conn{} + +// DefaultTimeout is a package-level variable that sets the timeout value +// used for the Dial and DialTLS methods. +// +// WARNING: since this is a package-level variable, setting this value from +// multiple places will probably result in undesired behaviour. +var DefaultTimeout = 60 * time.Second + +// Dial connects to the given address on the given network using net.Dial +// and then returns a new Conn for the connection. +func Dial(network, addr string) (*Conn, error) { + c, err := net.DialTimeout(network, addr, DefaultTimeout) + if err != nil { + return nil, NewError(ErrorNetwork, err) + } + conn := NewConn(c, false) + conn.Start() + return conn, nil +} + +// DialTLS connects to the given address on the given network using tls.Dial +// and then returns a new Conn for the connection. +func DialTLS(network, addr string, config *tls.Config) (*Conn, error) { + dc, err := net.DialTimeout(network, addr, DefaultTimeout) + if err != nil { + return nil, NewError(ErrorNetwork, err) + } + c := tls.Client(dc, config) + err = c.Handshake() + if err != nil { + // Handshake error, close the established connection before we return an error + dc.Close() + return nil, NewError(ErrorNetwork, err) + } + conn := NewConn(c, true) + conn.Start() + return conn, nil +} + +// NewConn returns a new Conn using conn for network I/O. +func NewConn(conn net.Conn, isTLS bool) *Conn { + return &Conn{ + conn: conn, + chanConfirm: make(chan struct{}), + chanMessageID: make(chan int64), + chanMessage: make(chan *messagePacket, 10), + messageContexts: map[int64]*messageContext{}, + requestTimeout: 0, + isTLS: isTLS, + } +} + +// Start initializes goroutines to read responses and process messages +func (l *Conn) Start() { + go l.reader() + go l.processMessages() + l.wgClose.Add(1) +} + +// isClosing returns whether or not we're currently closing. +func (l *Conn) isClosing() bool { + return atomic.LoadUint32(&l.closing) == 1 +} + +// setClosing sets the closing value to true +func (l *Conn) setClosing() bool { + return atomic.CompareAndSwapUint32(&l.closing, 0, 1) +} + +// Close closes the connection. +func (l *Conn) Close() { + l.messageMutex.Lock() + defer l.messageMutex.Unlock() + + if l.setClosing() { + l.Debug.Printf("Sending quit message and waiting for confirmation") + l.chanMessage <- &messagePacket{Op: MessageQuit} + <-l.chanConfirm + close(l.chanMessage) + + l.Debug.Printf("Closing network connection") + if err := l.conn.Close(); err != nil { + log.Println(err) + } + + l.wgClose.Done() + } + l.wgClose.Wait() +} + +// SetTimeout sets the time after a request is sent that a MessageTimeout triggers +func (l *Conn) SetTimeout(timeout time.Duration) { + if timeout > 0 { + atomic.StoreInt64(&l.requestTimeout, int64(timeout)) + } +} + +// Returns the next available messageID +func (l *Conn) nextMessageID() int64 { + if messageID, ok := <-l.chanMessageID; ok { + return messageID + } + return 0 +} + +// StartTLS sends the command to start a TLS session and then creates a new TLS Client +func (l *Conn) StartTLS(config *tls.Config) error { + if l.isTLS { + return NewError(ErrorNetwork, errors.New("ldap: already encrypted")) + } + + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Start TLS") + request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, "1.3.6.1.4.1.1466.20037", "TLS Extended Command")) + packet.AppendChild(request) + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessageWithFlags(packet, startTLS) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + l.Close() + return err + } + ber.PrintPacket(packet) + } + + if resultCode, message := getLDAPResultCode(packet); resultCode == LDAPResultSuccess { + conn := tls.Client(l.conn, config) + + if err := conn.Handshake(); err != nil { + l.Close() + return NewError(ErrorNetwork, fmt.Errorf("TLS handshake failed (%v)", err)) + } + + l.isTLS = true + l.conn = conn + } else { + return NewError(resultCode, fmt.Errorf("ldap: cannot StartTLS (%s)", message)) + } + go l.reader() + + return nil +} + +func (l *Conn) sendMessage(packet *ber.Packet) (*messageContext, error) { + return l.sendMessageWithFlags(packet, 0) +} + +func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) { + if l.isClosing() { + return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed")) + } + l.messageMutex.Lock() + l.Debug.Printf("flags&startTLS = %d", flags&startTLS) + if l.isStartingTLS { + l.messageMutex.Unlock() + return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase")) + } + if flags&startTLS != 0 { + if l.outstandingRequests != 0 { + l.messageMutex.Unlock() + return nil, NewError(ErrorNetwork, errors.New("ldap: cannot StartTLS with outstanding requests")) + } + l.isStartingTLS = true + } + l.outstandingRequests++ + + l.messageMutex.Unlock() + + responses := make(chan *PacketResponse) + messageID := packet.Children[0].Value.(int64) + message := &messagePacket{ + Op: MessageRequest, + MessageID: messageID, + Packet: packet, + Context: &messageContext{ + id: messageID, + done: make(chan struct{}), + responses: responses, + }, + } + l.sendProcessMessage(message) + return message.Context, nil +} + +func (l *Conn) finishMessage(msgCtx *messageContext) { + close(msgCtx.done) + + if l.isClosing() { + return + } + + l.messageMutex.Lock() + l.outstandingRequests-- + if l.isStartingTLS { + l.isStartingTLS = false + } + l.messageMutex.Unlock() + + message := &messagePacket{ + Op: MessageFinish, + MessageID: msgCtx.id, + } + l.sendProcessMessage(message) +} + +func (l *Conn) sendProcessMessage(message *messagePacket) bool { + l.messageMutex.Lock() + defer l.messageMutex.Unlock() + if l.isClosing() { + return false + } + l.chanMessage <- message + return true +} + +func (l *Conn) processMessages() { + defer func() { + if err := recover(); err != nil { + log.Printf("ldap: recovered panic in processMessages: %v", err) + } + for messageID, msgCtx := range l.messageContexts { + // If we are closing due to an error, inform anyone who + // is waiting about the error. + if l.isClosing() && l.closeErr.Load() != nil { + msgCtx.sendResponse(&PacketResponse{Error: l.closeErr.Load().(error)}) + } + l.Debug.Printf("Closing channel for MessageID %d", messageID) + close(msgCtx.responses) + delete(l.messageContexts, messageID) + } + close(l.chanMessageID) + close(l.chanConfirm) + }() + + var messageID int64 = 1 + for { + select { + case l.chanMessageID <- messageID: + messageID++ + case message := <-l.chanMessage: + switch message.Op { + case MessageQuit: + l.Debug.Printf("Shutting down - quit message received") + return + case MessageRequest: + // Add to message list and write to network + l.Debug.Printf("Sending message %d", message.MessageID) + + buf := message.Packet.Bytes() + _, err := l.conn.Write(buf) + if err != nil { + l.Debug.Printf("Error Sending Message: %s", err.Error()) + message.Context.sendResponse(&PacketResponse{Error: fmt.Errorf("unable to send request: %s", err)}) + close(message.Context.responses) + break + } + + // Only add to messageContexts if we were able to + // successfully write the message. + l.messageContexts[message.MessageID] = message.Context + + // Add timeout if defined + requestTimeout := time.Duration(atomic.LoadInt64(&l.requestTimeout)) + if requestTimeout > 0 { + go func() { + defer func() { + if err := recover(); err != nil { + log.Printf("ldap: recovered panic in RequestTimeout: %v", err) + } + }() + time.Sleep(requestTimeout) + timeoutMessage := &messagePacket{ + Op: MessageTimeout, + MessageID: message.MessageID, + } + l.sendProcessMessage(timeoutMessage) + }() + } + case MessageResponse: + l.Debug.Printf("Receiving message %d", message.MessageID) + if msgCtx, ok := l.messageContexts[message.MessageID]; ok { + msgCtx.sendResponse(&PacketResponse{message.Packet, nil}) + } else { + log.Printf("Received unexpected message %d, %v", message.MessageID, l.isClosing()) + ber.PrintPacket(message.Packet) + } + case MessageTimeout: + // Handle the timeout by closing the channel + // All reads will return immediately + if msgCtx, ok := l.messageContexts[message.MessageID]; ok { + l.Debug.Printf("Receiving message timeout for %d", message.MessageID) + msgCtx.sendResponse(&PacketResponse{message.Packet, errors.New("ldap: connection timed out")}) + delete(l.messageContexts, message.MessageID) + close(msgCtx.responses) + } + case MessageFinish: + l.Debug.Printf("Finished message %d", message.MessageID) + if msgCtx, ok := l.messageContexts[message.MessageID]; ok { + delete(l.messageContexts, message.MessageID) + close(msgCtx.responses) + } + } + } + } +} + +func (l *Conn) reader() { + cleanstop := false + defer func() { + if err := recover(); err != nil { + log.Printf("ldap: recovered panic in reader: %v", err) + } + if !cleanstop { + l.Close() + } + }() + + for { + if cleanstop { + l.Debug.Printf("reader clean stopping (without closing the connection)") + return + } + packet, err := ber.ReadPacket(l.conn) + if err != nil { + // A read error is expected here if we are closing the connection... + if !l.isClosing() { + l.closeErr.Store(fmt.Errorf("unable to read LDAP response packet: %s", err)) + l.Debug.Printf("reader error: %s", err.Error()) + } + return + } + addLDAPDescriptions(packet) + if len(packet.Children) == 0 { + l.Debug.Printf("Received bad ldap packet") + continue + } + l.messageMutex.Lock() + if l.isStartingTLS { + cleanstop = true + } + l.messageMutex.Unlock() + message := &messagePacket{ + Op: MessageResponse, + MessageID: packet.Children[0].Value.(int64), + Packet: packet, + } + if !l.sendProcessMessage(message) { + return + } + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/control.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/control.go new file mode 100644 index 00000000..342f325c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/control.go @@ -0,0 +1,420 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ldap + +import ( + "fmt" + "strconv" + + "gopkg.in/asn1-ber.v1" +) + +const ( + // ControlTypePaging - https://www.ietf.org/rfc/rfc2696.txt + ControlTypePaging = "1.2.840.113556.1.4.319" + // ControlTypeBeheraPasswordPolicy - https://tools.ietf.org/html/draft-behera-ldap-password-policy-10 + ControlTypeBeheraPasswordPolicy = "1.3.6.1.4.1.42.2.27.8.5.1" + // ControlTypeVChuPasswordMustChange - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 + ControlTypeVChuPasswordMustChange = "2.16.840.1.113730.3.4.4" + // ControlTypeVChuPasswordWarning - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 + ControlTypeVChuPasswordWarning = "2.16.840.1.113730.3.4.5" + // ControlTypeManageDsaIT - https://tools.ietf.org/html/rfc3296 + ControlTypeManageDsaIT = "2.16.840.1.113730.3.4.2" +) + +// ControlTypeMap maps controls to text descriptions +var ControlTypeMap = map[string]string{ + ControlTypePaging: "Paging", + ControlTypeBeheraPasswordPolicy: "Password Policy - Behera Draft", + ControlTypeManageDsaIT: "Manage DSA IT", +} + +// Control defines an interface controls provide to encode and describe themselves +type Control interface { + // GetControlType returns the OID + GetControlType() string + // Encode returns the ber packet representation + Encode() *ber.Packet + // String returns a human-readable description + String() string +} + +// ControlString implements the Control interface for simple controls +type ControlString struct { + ControlType string + Criticality bool + ControlValue string +} + +// GetControlType returns the OID +func (c *ControlString) GetControlType() string { + return c.ControlType +} + +// Encode returns the ber packet representation +func (c *ControlString) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, c.ControlType, "Control Type ("+ControlTypeMap[c.ControlType]+")")) + if c.Criticality { + packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality")) + } + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, string(c.ControlValue), "Control Value")) + return packet +} + +// String returns a human-readable description +func (c *ControlString) String() string { + return fmt.Sprintf("Control Type: %s (%q) Criticality: %t Control Value: %s", ControlTypeMap[c.ControlType], c.ControlType, c.Criticality, c.ControlValue) +} + +// ControlPaging implements the paging control described in https://www.ietf.org/rfc/rfc2696.txt +type ControlPaging struct { + // PagingSize indicates the page size + PagingSize uint32 + // Cookie is an opaque value returned by the server to track a paging cursor + Cookie []byte +} + +// GetControlType returns the OID +func (c *ControlPaging) GetControlType() string { + return ControlTypePaging +} + +// Encode returns the ber packet representation +func (c *ControlPaging) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypePaging, "Control Type ("+ControlTypeMap[ControlTypePaging]+")")) + + p2 := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Control Value (Paging)") + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Search Control Value") + seq.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, int64(c.PagingSize), "Paging Size")) + cookie := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Cookie") + cookie.Value = c.Cookie + cookie.Data.Write(c.Cookie) + seq.AppendChild(cookie) + p2.AppendChild(seq) + + packet.AppendChild(p2) + return packet +} + +// String returns a human-readable description +func (c *ControlPaging) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t PagingSize: %d Cookie: %q", + ControlTypeMap[ControlTypePaging], + ControlTypePaging, + false, + c.PagingSize, + c.Cookie) +} + +// SetCookie stores the given cookie in the paging control +func (c *ControlPaging) SetCookie(cookie []byte) { + c.Cookie = cookie +} + +// ControlBeheraPasswordPolicy implements the control described in https://tools.ietf.org/html/draft-behera-ldap-password-policy-10 +type ControlBeheraPasswordPolicy struct { + // Expire contains the number of seconds before a password will expire + Expire int64 + // Grace indicates the remaining number of times a user will be allowed to authenticate with an expired password + Grace int64 + // Error indicates the error code + Error int8 + // ErrorString is a human readable error + ErrorString string +} + +// GetControlType returns the OID +func (c *ControlBeheraPasswordPolicy) GetControlType() string { + return ControlTypeBeheraPasswordPolicy +} + +// Encode returns the ber packet representation +func (c *ControlBeheraPasswordPolicy) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeBeheraPasswordPolicy, "Control Type ("+ControlTypeMap[ControlTypeBeheraPasswordPolicy]+")")) + + return packet +} + +// String returns a human-readable description +func (c *ControlBeheraPasswordPolicy) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t Expire: %d Grace: %d Error: %d, ErrorString: %s", + ControlTypeMap[ControlTypeBeheraPasswordPolicy], + ControlTypeBeheraPasswordPolicy, + false, + c.Expire, + c.Grace, + c.Error, + c.ErrorString) +} + +// ControlVChuPasswordMustChange implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 +type ControlVChuPasswordMustChange struct { + // MustChange indicates if the password is required to be changed + MustChange bool +} + +// GetControlType returns the OID +func (c *ControlVChuPasswordMustChange) GetControlType() string { + return ControlTypeVChuPasswordMustChange +} + +// Encode returns the ber packet representation +func (c *ControlVChuPasswordMustChange) Encode() *ber.Packet { + return nil +} + +// String returns a human-readable description +func (c *ControlVChuPasswordMustChange) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t MustChange: %v", + ControlTypeMap[ControlTypeVChuPasswordMustChange], + ControlTypeVChuPasswordMustChange, + false, + c.MustChange) +} + +// ControlVChuPasswordWarning implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 +type ControlVChuPasswordWarning struct { + // Expire indicates the time in seconds until the password expires + Expire int64 +} + +// GetControlType returns the OID +func (c *ControlVChuPasswordWarning) GetControlType() string { + return ControlTypeVChuPasswordWarning +} + +// Encode returns the ber packet representation +func (c *ControlVChuPasswordWarning) Encode() *ber.Packet { + return nil +} + +// String returns a human-readable description +func (c *ControlVChuPasswordWarning) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t Expire: %b", + ControlTypeMap[ControlTypeVChuPasswordWarning], + ControlTypeVChuPasswordWarning, + false, + c.Expire) +} + +// ControlManageDsaIT implements the control described in https://tools.ietf.org/html/rfc3296 +type ControlManageDsaIT struct { + // Criticality indicates if this control is required + Criticality bool +} + +// GetControlType returns the OID +func (c *ControlManageDsaIT) GetControlType() string { + return ControlTypeManageDsaIT +} + +// Encode returns the ber packet representation +func (c *ControlManageDsaIT) Encode() *ber.Packet { + //FIXME + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeManageDsaIT, "Control Type ("+ControlTypeMap[ControlTypeManageDsaIT]+")")) + if c.Criticality { + packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality")) + } + return packet +} + +// String returns a human-readable description +func (c *ControlManageDsaIT) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t", + ControlTypeMap[ControlTypeManageDsaIT], + ControlTypeManageDsaIT, + c.Criticality) +} + +// NewControlManageDsaIT returns a ControlManageDsaIT control +func NewControlManageDsaIT(Criticality bool) *ControlManageDsaIT { + return &ControlManageDsaIT{Criticality: Criticality} +} + +// FindControl returns the first control of the given type in the list, or nil +func FindControl(controls []Control, controlType string) Control { + for _, c := range controls { + if c.GetControlType() == controlType { + return c + } + } + return nil +} + +// DecodeControl returns a control read from the given packet, or nil if no recognized control can be made +func DecodeControl(packet *ber.Packet) Control { + var ( + ControlType = "" + Criticality = false + value *ber.Packet + ) + + switch len(packet.Children) { + case 0: + // at least one child is required for control type + return nil + + case 1: + // just type, no criticality or value + packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")" + ControlType = packet.Children[0].Value.(string) + + case 2: + packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")" + ControlType = packet.Children[0].Value.(string) + + // Children[1] could be criticality or value (both are optional) + // duck-type on whether this is a boolean + if _, ok := packet.Children[1].Value.(bool); ok { + packet.Children[1].Description = "Criticality" + Criticality = packet.Children[1].Value.(bool) + } else { + packet.Children[1].Description = "Control Value" + value = packet.Children[1] + } + + case 3: + packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")" + ControlType = packet.Children[0].Value.(string) + + packet.Children[1].Description = "Criticality" + Criticality = packet.Children[1].Value.(bool) + + packet.Children[2].Description = "Control Value" + value = packet.Children[2] + + default: + // more than 3 children is invalid + return nil + } + + switch ControlType { + case ControlTypeManageDsaIT: + return NewControlManageDsaIT(Criticality) + case ControlTypePaging: + value.Description += " (Paging)" + c := new(ControlPaging) + if value.Value != nil { + valueChildren := ber.DecodePacket(value.Data.Bytes()) + value.Data.Truncate(0) + value.Value = nil + value.AppendChild(valueChildren) + } + value = value.Children[0] + value.Description = "Search Control Value" + value.Children[0].Description = "Paging Size" + value.Children[1].Description = "Cookie" + c.PagingSize = uint32(value.Children[0].Value.(int64)) + c.Cookie = value.Children[1].Data.Bytes() + value.Children[1].Value = c.Cookie + return c + case ControlTypeBeheraPasswordPolicy: + value.Description += " (Password Policy - Behera)" + c := NewControlBeheraPasswordPolicy() + if value.Value != nil { + valueChildren := ber.DecodePacket(value.Data.Bytes()) + value.Data.Truncate(0) + value.Value = nil + value.AppendChild(valueChildren) + } + + sequence := value.Children[0] + + for _, child := range sequence.Children { + if child.Tag == 0 { + //Warning + warningPacket := child.Children[0] + packet := ber.DecodePacket(warningPacket.Data.Bytes()) + val, ok := packet.Value.(int64) + if ok { + if warningPacket.Tag == 0 { + //timeBeforeExpiration + c.Expire = val + warningPacket.Value = c.Expire + } else if warningPacket.Tag == 1 { + //graceAuthNsRemaining + c.Grace = val + warningPacket.Value = c.Grace + } + } + } else if child.Tag == 1 { + // Error + packet := ber.DecodePacket(child.Data.Bytes()) + val, ok := packet.Value.(int8) + if !ok { + // what to do? + val = -1 + } + c.Error = val + child.Value = c.Error + c.ErrorString = BeheraPasswordPolicyErrorMap[c.Error] + } + } + return c + case ControlTypeVChuPasswordMustChange: + c := &ControlVChuPasswordMustChange{MustChange: true} + return c + case ControlTypeVChuPasswordWarning: + c := &ControlVChuPasswordWarning{Expire: -1} + expireStr := ber.DecodeString(value.Data.Bytes()) + + expire, err := strconv.ParseInt(expireStr, 10, 64) + if err != nil { + return nil + } + c.Expire = expire + value.Value = c.Expire + + return c + default: + c := new(ControlString) + c.ControlType = ControlType + c.Criticality = Criticality + if value != nil { + c.ControlValue = value.Value.(string) + } + return c + } +} + +// NewControlString returns a generic control +func NewControlString(controlType string, criticality bool, controlValue string) *ControlString { + return &ControlString{ + ControlType: controlType, + Criticality: criticality, + ControlValue: controlValue, + } +} + +// NewControlPaging returns a paging control +func NewControlPaging(pagingSize uint32) *ControlPaging { + return &ControlPaging{PagingSize: pagingSize} +} + +// NewControlBeheraPasswordPolicy returns a ControlBeheraPasswordPolicy +func NewControlBeheraPasswordPolicy() *ControlBeheraPasswordPolicy { + return &ControlBeheraPasswordPolicy{ + Expire: -1, + Grace: -1, + Error: -1, + } +} + +func encodeControls(controls []Control) *ber.Packet { + packet := ber.Encode(ber.ClassContext, ber.TypeConstructed, 0, nil, "Controls") + for _, control := range controls { + packet.AppendChild(control.Encode()) + } + return packet +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/debug.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/debug.go new file mode 100644 index 00000000..7279fc25 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/debug.go @@ -0,0 +1,24 @@ +package ldap + +import ( + "log" + + "gopkg.in/asn1-ber.v1" +) + +// debugging type +// - has a Printf method to write the debug output +type debugging bool + +// write debug output +func (debug debugging) Printf(format string, args ...interface{}) { + if debug { + log.Printf(format, args...) + } +} + +func (debug debugging) PrintPacket(packet *ber.Packet) { + if debug { + ber.PrintPacket(packet) + } +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/del.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/del.go new file mode 100644 index 00000000..4fd63dc3 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/del.go @@ -0,0 +1,84 @@ +// +// https://tools.ietf.org/html/rfc4511 +// +// DelRequest ::= [APPLICATION 10] LDAPDN + +package ldap + +import ( + "errors" + "log" + + "gopkg.in/asn1-ber.v1" +) + +// DelRequest implements an LDAP deletion request +type DelRequest struct { + // DN is the name of the directory entry to delete + DN string + // Controls hold optional controls to send with the request + Controls []Control +} + +func (d DelRequest) encode() *ber.Packet { + request := ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationDelRequest, d.DN, "Del Request") + request.Data.Write([]byte(d.DN)) + return request +} + +// NewDelRequest creates a delete request for the given DN and controls +func NewDelRequest(DN string, + Controls []Control) *DelRequest { + return &DelRequest{ + DN: DN, + Controls: Controls, + } +} + +// Del executes the given delete request +func (l *Conn) Del(delRequest *DelRequest) error { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + packet.AppendChild(delRequest.encode()) + if delRequest.Controls != nil { + packet.AppendChild(encodeControls(delRequest.Controls)) + } + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationDelResponse { + resultCode, resultDescription := getLDAPResultCode(packet) + if resultCode != 0 { + return NewError(resultCode, errors.New(resultDescription)) + } + } else { + log.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + + l.Debug.Printf("%d: returning", msgCtx.id) + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/dn.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/dn.go new file mode 100644 index 00000000..34e9023a --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/dn.go @@ -0,0 +1,247 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// File contains DN parsing functionality +// +// https://tools.ietf.org/html/rfc4514 +// +// distinguishedName = [ relativeDistinguishedName +// *( COMMA relativeDistinguishedName ) ] +// relativeDistinguishedName = attributeTypeAndValue +// *( PLUS attributeTypeAndValue ) +// attributeTypeAndValue = attributeType EQUALS attributeValue +// attributeType = descr / numericoid +// attributeValue = string / hexstring +// +// ; The following characters are to be escaped when they appear +// ; in the value to be encoded: ESC, one of <escaped>, leading +// ; SHARP or SPACE, trailing SPACE, and NULL. +// string = [ ( leadchar / pair ) [ *( stringchar / pair ) +// ( trailchar / pair ) ] ] +// +// leadchar = LUTF1 / UTFMB +// LUTF1 = %x01-1F / %x21 / %x24-2A / %x2D-3A / +// %x3D / %x3F-5B / %x5D-7F +// +// trailchar = TUTF1 / UTFMB +// TUTF1 = %x01-1F / %x21 / %x23-2A / %x2D-3A / +// %x3D / %x3F-5B / %x5D-7F +// +// stringchar = SUTF1 / UTFMB +// SUTF1 = %x01-21 / %x23-2A / %x2D-3A / +// %x3D / %x3F-5B / %x5D-7F +// +// pair = ESC ( ESC / special / hexpair ) +// special = escaped / SPACE / SHARP / EQUALS +// escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE +// hexstring = SHARP 1*hexpair +// hexpair = HEX HEX +// +// where the productions <descr>, <numericoid>, <COMMA>, <DQUOTE>, +// <EQUALS>, <ESC>, <HEX>, <LANGLE>, <NULL>, <PLUS>, <RANGLE>, <SEMI>, +// <SPACE>, <SHARP>, and <UTFMB> are defined in [RFC4512]. +// + +package ldap + +import ( + "bytes" + enchex "encoding/hex" + "errors" + "fmt" + "strings" + + "gopkg.in/asn1-ber.v1" +) + +// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514 +type AttributeTypeAndValue struct { + // Type is the attribute type + Type string + // Value is the attribute value + Value string +} + +// RelativeDN represents a relativeDistinguishedName from https://tools.ietf.org/html/rfc4514 +type RelativeDN struct { + Attributes []*AttributeTypeAndValue +} + +// DN represents a distinguishedName from https://tools.ietf.org/html/rfc4514 +type DN struct { + RDNs []*RelativeDN +} + +// ParseDN returns a distinguishedName or an error +func ParseDN(str string) (*DN, error) { + dn := new(DN) + dn.RDNs = make([]*RelativeDN, 0) + rdn := new(RelativeDN) + rdn.Attributes = make([]*AttributeTypeAndValue, 0) + buffer := bytes.Buffer{} + attribute := new(AttributeTypeAndValue) + escaping := false + + unescapedTrailingSpaces := 0 + stringFromBuffer := func() string { + s := buffer.String() + s = s[0 : len(s)-unescapedTrailingSpaces] + buffer.Reset() + unescapedTrailingSpaces = 0 + return s + } + + for i := 0; i < len(str); i++ { + char := str[i] + if escaping { + unescapedTrailingSpaces = 0 + escaping = false + switch char { + case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\': + buffer.WriteByte(char) + continue + } + // Not a special character, assume hex encoded octet + if len(str) == i+1 { + return nil, errors.New("Got corrupted escaped character") + } + + dst := []byte{0} + n, err := enchex.Decode([]byte(dst), []byte(str[i:i+2])) + if err != nil { + return nil, fmt.Errorf("Failed to decode escaped character: %s", err) + } else if n != 1 { + return nil, fmt.Errorf("Expected 1 byte when un-escaping, got %d", n) + } + buffer.WriteByte(dst[0]) + i++ + } else if char == '\\' { + unescapedTrailingSpaces = 0 + escaping = true + } else if char == '=' { + attribute.Type = stringFromBuffer() + // Special case: If the first character in the value is # the + // following data is BER encoded so we can just fast forward + // and decode. + if len(str) > i+1 && str[i+1] == '#' { + i += 2 + index := strings.IndexAny(str[i:], ",+") + data := str + if index > 0 { + data = str[i : i+index] + } else { + data = str[i:] + } + rawBER, err := enchex.DecodeString(data) + if err != nil { + return nil, fmt.Errorf("Failed to decode BER encoding: %s", err) + } + packet := ber.DecodePacket(rawBER) + buffer.WriteString(packet.Data.String()) + i += len(data) - 1 + } + } else if char == ',' || char == '+' { + // We're done with this RDN or value, push it + if len(attribute.Type) == 0 { + return nil, errors.New("incomplete type, value pair") + } + attribute.Value = stringFromBuffer() + rdn.Attributes = append(rdn.Attributes, attribute) + attribute = new(AttributeTypeAndValue) + if char == ',' { + dn.RDNs = append(dn.RDNs, rdn) + rdn = new(RelativeDN) + rdn.Attributes = make([]*AttributeTypeAndValue, 0) + } + } else if char == ' ' && buffer.Len() == 0 { + // ignore unescaped leading spaces + continue + } else { + if char == ' ' { + // Track unescaped spaces in case they are trailing and we need to remove them + unescapedTrailingSpaces++ + } else { + // Reset if we see a non-space char + unescapedTrailingSpaces = 0 + } + buffer.WriteByte(char) + } + } + if buffer.Len() > 0 { + if len(attribute.Type) == 0 { + return nil, errors.New("DN ended with incomplete type, value pair") + } + attribute.Value = stringFromBuffer() + rdn.Attributes = append(rdn.Attributes, attribute) + dn.RDNs = append(dn.RDNs, rdn) + } + return dn, nil +} + +// Equal returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch). +// Returns true if they have the same number of relative distinguished names +// and corresponding relative distinguished names (by position) are the same. +func (d *DN) Equal(other *DN) bool { + if len(d.RDNs) != len(other.RDNs) { + return false + } + for i := range d.RDNs { + if !d.RDNs[i].Equal(other.RDNs[i]) { + return false + } + } + return true +} + +// AncestorOf returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN. +// "ou=widgets,o=acme.com" is an ancestor of "ou=sprockets,ou=widgets,o=acme.com" +// "ou=widgets,o=acme.com" is not an ancestor of "ou=sprockets,ou=widgets,o=foo.com" +// "ou=widgets,o=acme.com" is not an ancestor of "ou=widgets,o=acme.com" +func (d *DN) AncestorOf(other *DN) bool { + if len(d.RDNs) >= len(other.RDNs) { + return false + } + // Take the last `len(d.RDNs)` RDNs from the other DN to compare against + otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):] + for i := range d.RDNs { + if !d.RDNs[i].Equal(otherRDNs[i]) { + return false + } + } + return true +} + +// Equal returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch). +// Relative distinguished names are the same if and only if they have the same number of AttributeTypeAndValues +// and each attribute of the first RDN is the same as the attribute of the second RDN with the same attribute type. +// The order of attributes is not significant. +// Case of attribute types is not significant. +func (r *RelativeDN) Equal(other *RelativeDN) bool { + if len(r.Attributes) != len(other.Attributes) { + return false + } + return r.hasAllAttributes(other.Attributes) && other.hasAllAttributes(r.Attributes) +} + +func (r *RelativeDN) hasAllAttributes(attrs []*AttributeTypeAndValue) bool { + for _, attr := range attrs { + found := false + for _, myattr := range r.Attributes { + if myattr.Equal(attr) { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +// Equal returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue +// Case of the attribute type is not significant +func (a *AttributeTypeAndValue) Equal(other *AttributeTypeAndValue) bool { + return strings.EqualFold(a.Type, other.Type) && a.Value == other.Value +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/doc.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/doc.go new file mode 100644 index 00000000..f20d39bc --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/doc.go @@ -0,0 +1,4 @@ +/* +Package ldap provides basic LDAP v3 functionality. +*/ +package ldap diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/error.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/error.go new file mode 100644 index 00000000..4cccb537 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/error.go @@ -0,0 +1,155 @@ +package ldap + +import ( + "fmt" + + "gopkg.in/asn1-ber.v1" +) + +// LDAP Result Codes +const ( + LDAPResultSuccess = 0 + LDAPResultOperationsError = 1 + LDAPResultProtocolError = 2 + LDAPResultTimeLimitExceeded = 3 + LDAPResultSizeLimitExceeded = 4 + LDAPResultCompareFalse = 5 + LDAPResultCompareTrue = 6 + LDAPResultAuthMethodNotSupported = 7 + LDAPResultStrongAuthRequired = 8 + LDAPResultReferral = 10 + LDAPResultAdminLimitExceeded = 11 + LDAPResultUnavailableCriticalExtension = 12 + LDAPResultConfidentialityRequired = 13 + LDAPResultSaslBindInProgress = 14 + LDAPResultNoSuchAttribute = 16 + LDAPResultUndefinedAttributeType = 17 + LDAPResultInappropriateMatching = 18 + LDAPResultConstraintViolation = 19 + LDAPResultAttributeOrValueExists = 20 + LDAPResultInvalidAttributeSyntax = 21 + LDAPResultNoSuchObject = 32 + LDAPResultAliasProblem = 33 + LDAPResultInvalidDNSyntax = 34 + LDAPResultAliasDereferencingProblem = 36 + LDAPResultInappropriateAuthentication = 48 + LDAPResultInvalidCredentials = 49 + LDAPResultInsufficientAccessRights = 50 + LDAPResultBusy = 51 + LDAPResultUnavailable = 52 + LDAPResultUnwillingToPerform = 53 + LDAPResultLoopDetect = 54 + LDAPResultNamingViolation = 64 + LDAPResultObjectClassViolation = 65 + LDAPResultNotAllowedOnNonLeaf = 66 + LDAPResultNotAllowedOnRDN = 67 + LDAPResultEntryAlreadyExists = 68 + LDAPResultObjectClassModsProhibited = 69 + LDAPResultAffectsMultipleDSAs = 71 + LDAPResultOther = 80 + + ErrorNetwork = 200 + ErrorFilterCompile = 201 + ErrorFilterDecompile = 202 + ErrorDebugging = 203 + ErrorUnexpectedMessage = 204 + ErrorUnexpectedResponse = 205 +) + +// LDAPResultCodeMap contains string descriptions for LDAP error codes +var LDAPResultCodeMap = map[uint8]string{ + LDAPResultSuccess: "Success", + LDAPResultOperationsError: "Operations Error", + LDAPResultProtocolError: "Protocol Error", + LDAPResultTimeLimitExceeded: "Time Limit Exceeded", + LDAPResultSizeLimitExceeded: "Size Limit Exceeded", + LDAPResultCompareFalse: "Compare False", + LDAPResultCompareTrue: "Compare True", + LDAPResultAuthMethodNotSupported: "Auth Method Not Supported", + LDAPResultStrongAuthRequired: "Strong Auth Required", + LDAPResultReferral: "Referral", + LDAPResultAdminLimitExceeded: "Admin Limit Exceeded", + LDAPResultUnavailableCriticalExtension: "Unavailable Critical Extension", + LDAPResultConfidentialityRequired: "Confidentiality Required", + LDAPResultSaslBindInProgress: "Sasl Bind In Progress", + LDAPResultNoSuchAttribute: "No Such Attribute", + LDAPResultUndefinedAttributeType: "Undefined Attribute Type", + LDAPResultInappropriateMatching: "Inappropriate Matching", + LDAPResultConstraintViolation: "Constraint Violation", + LDAPResultAttributeOrValueExists: "Attribute Or Value Exists", + LDAPResultInvalidAttributeSyntax: "Invalid Attribute Syntax", + LDAPResultNoSuchObject: "No Such Object", + LDAPResultAliasProblem: "Alias Problem", + LDAPResultInvalidDNSyntax: "Invalid DN Syntax", + LDAPResultAliasDereferencingProblem: "Alias Dereferencing Problem", + LDAPResultInappropriateAuthentication: "Inappropriate Authentication", + LDAPResultInvalidCredentials: "Invalid Credentials", + LDAPResultInsufficientAccessRights: "Insufficient Access Rights", + LDAPResultBusy: "Busy", + LDAPResultUnavailable: "Unavailable", + LDAPResultUnwillingToPerform: "Unwilling To Perform", + LDAPResultLoopDetect: "Loop Detect", + LDAPResultNamingViolation: "Naming Violation", + LDAPResultObjectClassViolation: "Object Class Violation", + LDAPResultNotAllowedOnNonLeaf: "Not Allowed On Non Leaf", + LDAPResultNotAllowedOnRDN: "Not Allowed On RDN", + LDAPResultEntryAlreadyExists: "Entry Already Exists", + LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited", + LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs", + LDAPResultOther: "Other", + + ErrorNetwork: "Network Error", + ErrorFilterCompile: "Filter Compile Error", + ErrorFilterDecompile: "Filter Decompile Error", + ErrorDebugging: "Debugging Error", + ErrorUnexpectedMessage: "Unexpected Message", + ErrorUnexpectedResponse: "Unexpected Response", +} + +func getLDAPResultCode(packet *ber.Packet) (code uint8, description string) { + if packet == nil { + return ErrorUnexpectedResponse, "Empty packet" + } else if len(packet.Children) >= 2 { + response := packet.Children[1] + if response == nil { + return ErrorUnexpectedResponse, "Empty response in packet" + } + if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 { + // Children[1].Children[2] is the diagnosticMessage which is guaranteed to exist as seen here: https://tools.ietf.org/html/rfc4511#section-4.1.9 + return uint8(response.Children[0].Value.(int64)), response.Children[2].Value.(string) + } + } + + return ErrorNetwork, "Invalid packet format" +} + +// Error holds LDAP error information +type Error struct { + // Err is the underlying error + Err error + // ResultCode is the LDAP error code + ResultCode uint8 +} + +func (e *Error) Error() string { + return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error()) +} + +// NewError creates an LDAP error with the given code and underlying error +func NewError(resultCode uint8, err error) error { + return &Error{ResultCode: resultCode, Err: err} +} + +// IsErrorWithCode returns true if the given error is an LDAP error with the given result code +func IsErrorWithCode(err error, desiredResultCode uint8) bool { + if err == nil { + return false + } + + serverError, ok := err.(*Error) + if !ok { + return false + } + + return serverError.ResultCode == desiredResultCode +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/filter.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/filter.go new file mode 100644 index 00000000..3858a286 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/filter.go @@ -0,0 +1,469 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ldap + +import ( + "bytes" + hexpac "encoding/hex" + "errors" + "fmt" + "strings" + "unicode/utf8" + + "gopkg.in/asn1-ber.v1" +) + +// Filter choices +const ( + FilterAnd = 0 + FilterOr = 1 + FilterNot = 2 + FilterEqualityMatch = 3 + FilterSubstrings = 4 + FilterGreaterOrEqual = 5 + FilterLessOrEqual = 6 + FilterPresent = 7 + FilterApproxMatch = 8 + FilterExtensibleMatch = 9 +) + +// FilterMap contains human readable descriptions of Filter choices +var FilterMap = map[uint64]string{ + FilterAnd: "And", + FilterOr: "Or", + FilterNot: "Not", + FilterEqualityMatch: "Equality Match", + FilterSubstrings: "Substrings", + FilterGreaterOrEqual: "Greater Or Equal", + FilterLessOrEqual: "Less Or Equal", + FilterPresent: "Present", + FilterApproxMatch: "Approx Match", + FilterExtensibleMatch: "Extensible Match", +} + +// SubstringFilter options +const ( + FilterSubstringsInitial = 0 + FilterSubstringsAny = 1 + FilterSubstringsFinal = 2 +) + +// FilterSubstringsMap contains human readable descriptions of SubstringFilter choices +var FilterSubstringsMap = map[uint64]string{ + FilterSubstringsInitial: "Substrings Initial", + FilterSubstringsAny: "Substrings Any", + FilterSubstringsFinal: "Substrings Final", +} + +// MatchingRuleAssertion choices +const ( + MatchingRuleAssertionMatchingRule = 1 + MatchingRuleAssertionType = 2 + MatchingRuleAssertionMatchValue = 3 + MatchingRuleAssertionDNAttributes = 4 +) + +// MatchingRuleAssertionMap contains human readable descriptions of MatchingRuleAssertion choices +var MatchingRuleAssertionMap = map[uint64]string{ + MatchingRuleAssertionMatchingRule: "Matching Rule Assertion Matching Rule", + MatchingRuleAssertionType: "Matching Rule Assertion Type", + MatchingRuleAssertionMatchValue: "Matching Rule Assertion Match Value", + MatchingRuleAssertionDNAttributes: "Matching Rule Assertion DN Attributes", +} + +// CompileFilter converts a string representation of a filter into a BER-encoded packet +func CompileFilter(filter string) (*ber.Packet, error) { + if len(filter) == 0 || filter[0] != '(' { + return nil, NewError(ErrorFilterCompile, errors.New("ldap: filter does not start with an '('")) + } + packet, pos, err := compileFilter(filter, 1) + if err != nil { + return nil, err + } + switch { + case pos > len(filter): + return nil, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter")) + case pos < len(filter): + return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:]))) + } + return packet, nil +} + +// DecompileFilter converts a packet representation of a filter into a string representation +func DecompileFilter(packet *ber.Packet) (ret string, err error) { + defer func() { + if r := recover(); r != nil { + err = NewError(ErrorFilterDecompile, errors.New("ldap: error decompiling filter")) + } + }() + ret = "(" + err = nil + childStr := "" + + switch packet.Tag { + case FilterAnd: + ret += "&" + for _, child := range packet.Children { + childStr, err = DecompileFilter(child) + if err != nil { + return + } + ret += childStr + } + case FilterOr: + ret += "|" + for _, child := range packet.Children { + childStr, err = DecompileFilter(child) + if err != nil { + return + } + ret += childStr + } + case FilterNot: + ret += "!" + childStr, err = DecompileFilter(packet.Children[0]) + if err != nil { + return + } + ret += childStr + + case FilterSubstrings: + ret += ber.DecodeString(packet.Children[0].Data.Bytes()) + ret += "=" + for i, child := range packet.Children[1].Children { + if i == 0 && child.Tag != FilterSubstringsInitial { + ret += "*" + } + ret += EscapeFilter(ber.DecodeString(child.Data.Bytes())) + if child.Tag != FilterSubstringsFinal { + ret += "*" + } + } + case FilterEqualityMatch: + ret += ber.DecodeString(packet.Children[0].Data.Bytes()) + ret += "=" + ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())) + case FilterGreaterOrEqual: + ret += ber.DecodeString(packet.Children[0].Data.Bytes()) + ret += ">=" + ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())) + case FilterLessOrEqual: + ret += ber.DecodeString(packet.Children[0].Data.Bytes()) + ret += "<=" + ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())) + case FilterPresent: + ret += ber.DecodeString(packet.Data.Bytes()) + ret += "=*" + case FilterApproxMatch: + ret += ber.DecodeString(packet.Children[0].Data.Bytes()) + ret += "~=" + ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())) + case FilterExtensibleMatch: + attr := "" + dnAttributes := false + matchingRule := "" + value := "" + + for _, child := range packet.Children { + switch child.Tag { + case MatchingRuleAssertionMatchingRule: + matchingRule = ber.DecodeString(child.Data.Bytes()) + case MatchingRuleAssertionType: + attr = ber.DecodeString(child.Data.Bytes()) + case MatchingRuleAssertionMatchValue: + value = ber.DecodeString(child.Data.Bytes()) + case MatchingRuleAssertionDNAttributes: + dnAttributes = child.Value.(bool) + } + } + + if len(attr) > 0 { + ret += attr + } + if dnAttributes { + ret += ":dn" + } + if len(matchingRule) > 0 { + ret += ":" + ret += matchingRule + } + ret += ":=" + ret += EscapeFilter(value) + } + + ret += ")" + return +} + +func compileFilterSet(filter string, pos int, parent *ber.Packet) (int, error) { + for pos < len(filter) && filter[pos] == '(' { + child, newPos, err := compileFilter(filter, pos+1) + if err != nil { + return pos, err + } + pos = newPos + parent.AppendChild(child) + } + if pos == len(filter) { + return pos, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter")) + } + + return pos + 1, nil +} + +func compileFilter(filter string, pos int) (*ber.Packet, int, error) { + var ( + packet *ber.Packet + err error + ) + + defer func() { + if r := recover(); r != nil { + err = NewError(ErrorFilterCompile, errors.New("ldap: error compiling filter")) + } + }() + newPos := pos + + currentRune, currentWidth := utf8.DecodeRuneInString(filter[newPos:]) + + switch currentRune { + case utf8.RuneError: + return nil, 0, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos)) + case '(': + packet, newPos, err = compileFilter(filter, pos+currentWidth) + newPos++ + return packet, newPos, err + case '&': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterAnd, nil, FilterMap[FilterAnd]) + newPos, err = compileFilterSet(filter, pos+currentWidth, packet) + return packet, newPos, err + case '|': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterOr, nil, FilterMap[FilterOr]) + newPos, err = compileFilterSet(filter, pos+currentWidth, packet) + return packet, newPos, err + case '!': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterNot, nil, FilterMap[FilterNot]) + var child *ber.Packet + child, newPos, err = compileFilter(filter, pos+currentWidth) + packet.AppendChild(child) + return packet, newPos, err + default: + const ( + stateReadingAttr = 0 + stateReadingExtensibleMatchingRule = 1 + stateReadingCondition = 2 + ) + + state := stateReadingAttr + + attribute := "" + extensibleDNAttributes := false + extensibleMatchingRule := "" + condition := "" + + for newPos < len(filter) { + remainingFilter := filter[newPos:] + currentRune, currentWidth = utf8.DecodeRuneInString(remainingFilter) + if currentRune == ')' { + break + } + if currentRune == utf8.RuneError { + return packet, newPos, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos)) + } + + switch state { + case stateReadingAttr: + switch { + // Extensible rule, with only DN-matching + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + extensibleDNAttributes = true + state = stateReadingCondition + newPos += 5 + + // Extensible rule, with DN-matching and a matching OID + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:"): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + extensibleDNAttributes = true + state = stateReadingExtensibleMatchingRule + newPos += 4 + + // Extensible rule, with attr only + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + state = stateReadingCondition + newPos += 2 + + // Extensible rule, with no DN attribute matching + case currentRune == ':': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + state = stateReadingExtensibleMatchingRule + newPos++ + + // Equality condition + case currentRune == '=': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterEqualityMatch, nil, FilterMap[FilterEqualityMatch]) + state = stateReadingCondition + newPos++ + + // Greater-than or equal + case currentRune == '>' && strings.HasPrefix(remainingFilter, ">="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterGreaterOrEqual, nil, FilterMap[FilterGreaterOrEqual]) + state = stateReadingCondition + newPos += 2 + + // Less-than or equal + case currentRune == '<' && strings.HasPrefix(remainingFilter, "<="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterLessOrEqual, nil, FilterMap[FilterLessOrEqual]) + state = stateReadingCondition + newPos += 2 + + // Approx + case currentRune == '~' && strings.HasPrefix(remainingFilter, "~="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterApproxMatch, nil, FilterMap[FilterApproxMatch]) + state = stateReadingCondition + newPos += 2 + + // Still reading the attribute name + default: + attribute += fmt.Sprintf("%c", currentRune) + newPos += currentWidth + } + + case stateReadingExtensibleMatchingRule: + switch { + + // Matching rule OID is done + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="): + state = stateReadingCondition + newPos += 2 + + // Still reading the matching rule oid + default: + extensibleMatchingRule += fmt.Sprintf("%c", currentRune) + newPos += currentWidth + } + + case stateReadingCondition: + // append to the condition + condition += fmt.Sprintf("%c", currentRune) + newPos += currentWidth + } + } + + if newPos == len(filter) { + err = NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter")) + return packet, newPos, err + } + if packet == nil { + err = NewError(ErrorFilterCompile, errors.New("ldap: error parsing filter")) + return packet, newPos, err + } + + switch { + case packet.Tag == FilterExtensibleMatch: + // MatchingRuleAssertion ::= SEQUENCE { + // matchingRule [1] MatchingRuleID OPTIONAL, + // type [2] AttributeDescription OPTIONAL, + // matchValue [3] AssertionValue, + // dnAttributes [4] BOOLEAN DEFAULT FALSE + // } + + // Include the matching rule oid, if specified + if len(extensibleMatchingRule) > 0 { + packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchingRule, extensibleMatchingRule, MatchingRuleAssertionMap[MatchingRuleAssertionMatchingRule])) + } + + // Include the attribute, if specified + if len(attribute) > 0 { + packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionType, attribute, MatchingRuleAssertionMap[MatchingRuleAssertionType])) + } + + // Add the value (only required child) + encodedString, encodeErr := escapedStringToEncodedBytes(condition) + if encodeErr != nil { + return packet, newPos, encodeErr + } + packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchValue, encodedString, MatchingRuleAssertionMap[MatchingRuleAssertionMatchValue])) + + // Defaults to false, so only include in the sequence if true + if extensibleDNAttributes { + packet.AppendChild(ber.NewBoolean(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionDNAttributes, extensibleDNAttributes, MatchingRuleAssertionMap[MatchingRuleAssertionDNAttributes])) + } + + case packet.Tag == FilterEqualityMatch && condition == "*": + packet = ber.NewString(ber.ClassContext, ber.TypePrimitive, FilterPresent, attribute, FilterMap[FilterPresent]) + case packet.Tag == FilterEqualityMatch && strings.Contains(condition, "*"): + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute")) + packet.Tag = FilterSubstrings + packet.Description = FilterMap[uint64(packet.Tag)] + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Substrings") + parts := strings.Split(condition, "*") + for i, part := range parts { + if part == "" { + continue + } + var tag ber.Tag + switch i { + case 0: + tag = FilterSubstringsInitial + case len(parts) - 1: + tag = FilterSubstringsFinal + default: + tag = FilterSubstringsAny + } + encodedString, encodeErr := escapedStringToEncodedBytes(part) + if encodeErr != nil { + return packet, newPos, encodeErr + } + seq.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, tag, encodedString, FilterSubstringsMap[uint64(tag)])) + } + packet.AppendChild(seq) + default: + encodedString, encodeErr := escapedStringToEncodedBytes(condition) + if encodeErr != nil { + return packet, newPos, encodeErr + } + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute")) + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, encodedString, "Condition")) + } + + newPos += currentWidth + return packet, newPos, err + } +} + +// Convert from "ABC\xx\xx\xx" form to literal bytes for transport +func escapedStringToEncodedBytes(escapedString string) (string, error) { + var buffer bytes.Buffer + i := 0 + for i < len(escapedString) { + currentRune, currentWidth := utf8.DecodeRuneInString(escapedString[i:]) + if currentRune == utf8.RuneError { + return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", i)) + } + + // Check for escaped hex characters and convert them to their literal value for transport. + if currentRune == '\\' { + // http://tools.ietf.org/search/rfc4515 + // \ (%x5C) is not a valid character unless it is followed by two HEX characters due to not + // being a member of UTF1SUBSET. + if i+2 > len(escapedString) { + return "", NewError(ErrorFilterCompile, errors.New("ldap: missing characters for escape in filter")) + } + escByte, decodeErr := hexpac.DecodeString(escapedString[i+1 : i+3]) + if decodeErr != nil { + return "", NewError(ErrorFilterCompile, errors.New("ldap: invalid characters for escape in filter")) + } + buffer.WriteByte(escByte[0]) + i += 2 // +1 from end of loop, so 3 total for \xx. + } else { + buffer.WriteRune(currentRune) + } + + i += currentWidth + } + return buffer.String(), nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/ldap.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/ldap.go new file mode 100644 index 00000000..49692475 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/ldap.go @@ -0,0 +1,320 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ldap + +import ( + "errors" + "io/ioutil" + "os" + + "gopkg.in/asn1-ber.v1" +) + +// LDAP Application Codes +const ( + ApplicationBindRequest = 0 + ApplicationBindResponse = 1 + ApplicationUnbindRequest = 2 + ApplicationSearchRequest = 3 + ApplicationSearchResultEntry = 4 + ApplicationSearchResultDone = 5 + ApplicationModifyRequest = 6 + ApplicationModifyResponse = 7 + ApplicationAddRequest = 8 + ApplicationAddResponse = 9 + ApplicationDelRequest = 10 + ApplicationDelResponse = 11 + ApplicationModifyDNRequest = 12 + ApplicationModifyDNResponse = 13 + ApplicationCompareRequest = 14 + ApplicationCompareResponse = 15 + ApplicationAbandonRequest = 16 + ApplicationSearchResultReference = 19 + ApplicationExtendedRequest = 23 + ApplicationExtendedResponse = 24 +) + +// ApplicationMap contains human readable descriptions of LDAP Application Codes +var ApplicationMap = map[uint8]string{ + ApplicationBindRequest: "Bind Request", + ApplicationBindResponse: "Bind Response", + ApplicationUnbindRequest: "Unbind Request", + ApplicationSearchRequest: "Search Request", + ApplicationSearchResultEntry: "Search Result Entry", + ApplicationSearchResultDone: "Search Result Done", + ApplicationModifyRequest: "Modify Request", + ApplicationModifyResponse: "Modify Response", + ApplicationAddRequest: "Add Request", + ApplicationAddResponse: "Add Response", + ApplicationDelRequest: "Del Request", + ApplicationDelResponse: "Del Response", + ApplicationModifyDNRequest: "Modify DN Request", + ApplicationModifyDNResponse: "Modify DN Response", + ApplicationCompareRequest: "Compare Request", + ApplicationCompareResponse: "Compare Response", + ApplicationAbandonRequest: "Abandon Request", + ApplicationSearchResultReference: "Search Result Reference", + ApplicationExtendedRequest: "Extended Request", + ApplicationExtendedResponse: "Extended Response", +} + +// Ldap Behera Password Policy Draft 10 (https://tools.ietf.org/html/draft-behera-ldap-password-policy-10) +const ( + BeheraPasswordExpired = 0 + BeheraAccountLocked = 1 + BeheraChangeAfterReset = 2 + BeheraPasswordModNotAllowed = 3 + BeheraMustSupplyOldPassword = 4 + BeheraInsufficientPasswordQuality = 5 + BeheraPasswordTooShort = 6 + BeheraPasswordTooYoung = 7 + BeheraPasswordInHistory = 8 +) + +// BeheraPasswordPolicyErrorMap contains human readable descriptions of Behera Password Policy error codes +var BeheraPasswordPolicyErrorMap = map[int8]string{ + BeheraPasswordExpired: "Password expired", + BeheraAccountLocked: "Account locked", + BeheraChangeAfterReset: "Password must be changed", + BeheraPasswordModNotAllowed: "Policy prevents password modification", + BeheraMustSupplyOldPassword: "Policy requires old password in order to change password", + BeheraInsufficientPasswordQuality: "Password fails quality checks", + BeheraPasswordTooShort: "Password is too short for policy", + BeheraPasswordTooYoung: "Password has been changed too recently", + BeheraPasswordInHistory: "New password is in list of old passwords", +} + +// Adds descriptions to an LDAP Response packet for debugging +func addLDAPDescriptions(packet *ber.Packet) (err error) { + defer func() { + if r := recover(); r != nil { + err = NewError(ErrorDebugging, errors.New("ldap: cannot process packet to add descriptions")) + } + }() + packet.Description = "LDAP Response" + packet.Children[0].Description = "Message ID" + + application := uint8(packet.Children[1].Tag) + packet.Children[1].Description = ApplicationMap[application] + + switch application { + case ApplicationBindRequest: + addRequestDescriptions(packet) + case ApplicationBindResponse: + addDefaultLDAPResponseDescriptions(packet) + case ApplicationUnbindRequest: + addRequestDescriptions(packet) + case ApplicationSearchRequest: + addRequestDescriptions(packet) + case ApplicationSearchResultEntry: + packet.Children[1].Children[0].Description = "Object Name" + packet.Children[1].Children[1].Description = "Attributes" + for _, child := range packet.Children[1].Children[1].Children { + child.Description = "Attribute" + child.Children[0].Description = "Attribute Name" + child.Children[1].Description = "Attribute Values" + for _, grandchild := range child.Children[1].Children { + grandchild.Description = "Attribute Value" + } + } + if len(packet.Children) == 3 { + addControlDescriptions(packet.Children[2]) + } + case ApplicationSearchResultDone: + addDefaultLDAPResponseDescriptions(packet) + case ApplicationModifyRequest: + addRequestDescriptions(packet) + case ApplicationModifyResponse: + case ApplicationAddRequest: + addRequestDescriptions(packet) + case ApplicationAddResponse: + case ApplicationDelRequest: + addRequestDescriptions(packet) + case ApplicationDelResponse: + case ApplicationModifyDNRequest: + addRequestDescriptions(packet) + case ApplicationModifyDNResponse: + case ApplicationCompareRequest: + addRequestDescriptions(packet) + case ApplicationCompareResponse: + case ApplicationAbandonRequest: + addRequestDescriptions(packet) + case ApplicationSearchResultReference: + case ApplicationExtendedRequest: + addRequestDescriptions(packet) + case ApplicationExtendedResponse: + } + + return nil +} + +func addControlDescriptions(packet *ber.Packet) { + packet.Description = "Controls" + for _, child := range packet.Children { + var value *ber.Packet + controlType := "" + child.Description = "Control" + switch len(child.Children) { + case 0: + // at least one child is required for control type + continue + + case 1: + // just type, no criticality or value + controlType = child.Children[0].Value.(string) + child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")" + + case 2: + controlType = child.Children[0].Value.(string) + child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")" + // Children[1] could be criticality or value (both are optional) + // duck-type on whether this is a boolean + if _, ok := child.Children[1].Value.(bool); ok { + child.Children[1].Description = "Criticality" + } else { + child.Children[1].Description = "Control Value" + value = child.Children[1] + } + + case 3: + // criticality and value present + controlType = child.Children[0].Value.(string) + child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")" + child.Children[1].Description = "Criticality" + child.Children[2].Description = "Control Value" + value = child.Children[2] + + default: + // more than 3 children is invalid + continue + } + if value == nil { + continue + } + switch controlType { + case ControlTypePaging: + value.Description += " (Paging)" + if value.Value != nil { + valueChildren := ber.DecodePacket(value.Data.Bytes()) + value.Data.Truncate(0) + value.Value = nil + valueChildren.Children[1].Value = valueChildren.Children[1].Data.Bytes() + value.AppendChild(valueChildren) + } + value.Children[0].Description = "Real Search Control Value" + value.Children[0].Children[0].Description = "Paging Size" + value.Children[0].Children[1].Description = "Cookie" + + case ControlTypeBeheraPasswordPolicy: + value.Description += " (Password Policy - Behera Draft)" + if value.Value != nil { + valueChildren := ber.DecodePacket(value.Data.Bytes()) + value.Data.Truncate(0) + value.Value = nil + value.AppendChild(valueChildren) + } + sequence := value.Children[0] + for _, child := range sequence.Children { + if child.Tag == 0 { + //Warning + warningPacket := child.Children[0] + packet := ber.DecodePacket(warningPacket.Data.Bytes()) + val, ok := packet.Value.(int64) + if ok { + if warningPacket.Tag == 0 { + //timeBeforeExpiration + value.Description += " (TimeBeforeExpiration)" + warningPacket.Value = val + } else if warningPacket.Tag == 1 { + //graceAuthNsRemaining + value.Description += " (GraceAuthNsRemaining)" + warningPacket.Value = val + } + } + } else if child.Tag == 1 { + // Error + packet := ber.DecodePacket(child.Data.Bytes()) + val, ok := packet.Value.(int8) + if !ok { + val = -1 + } + child.Description = "Error" + child.Value = val + } + } + } + } +} + +func addRequestDescriptions(packet *ber.Packet) { + packet.Description = "LDAP Request" + packet.Children[0].Description = "Message ID" + packet.Children[1].Description = ApplicationMap[uint8(packet.Children[1].Tag)] + if len(packet.Children) == 3 { + addControlDescriptions(packet.Children[2]) + } +} + +func addDefaultLDAPResponseDescriptions(packet *ber.Packet) { + resultCode, _ := getLDAPResultCode(packet) + packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[resultCode] + ")" + packet.Children[1].Children[1].Description = "Matched DN" + packet.Children[1].Children[2].Description = "Error Message" + if len(packet.Children[1].Children) > 3 { + packet.Children[1].Children[3].Description = "Referral" + } + if len(packet.Children) == 3 { + addControlDescriptions(packet.Children[2]) + } +} + +// DebugBinaryFile reads and prints packets from the given filename +func DebugBinaryFile(fileName string) error { + file, err := ioutil.ReadFile(fileName) + if err != nil { + return NewError(ErrorDebugging, err) + } + ber.PrintBytes(os.Stdout, file, "") + packet := ber.DecodePacket(file) + addLDAPDescriptions(packet) + ber.PrintPacket(packet) + + return nil +} + +var hex = "0123456789abcdef" + +func mustEscape(c byte) bool { + return c > 0x7f || c == '(' || c == ')' || c == '\\' || c == '*' || c == 0 +} + +// EscapeFilter escapes from the provided LDAP filter string the special +// characters in the set `()*\` and those out of the range 0 < c < 0x80, +// as defined in RFC4515. +func EscapeFilter(filter string) string { + escape := 0 + for i := 0; i < len(filter); i++ { + if mustEscape(filter[i]) { + escape++ + } + } + if escape == 0 { + return filter + } + buf := make([]byte, len(filter)+escape*2) + for i, j := 0, 0; i < len(filter); i++ { + c := filter[i] + if mustEscape(c) { + buf[j+0] = '\\' + buf[j+1] = hex[c>>4] + buf[j+2] = hex[c&0xf] + j += 3 + } else { + buf[j] = c + j++ + } + } + return string(buf) +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/modify.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/modify.go new file mode 100644 index 00000000..e4ab6cef --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/modify.go @@ -0,0 +1,170 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// File contains Modify functionality +// +// https://tools.ietf.org/html/rfc4511 +// +// ModifyRequest ::= [APPLICATION 6] SEQUENCE { +// object LDAPDN, +// changes SEQUENCE OF change SEQUENCE { +// operation ENUMERATED { +// add (0), +// delete (1), +// replace (2), +// ... }, +// modification PartialAttribute } } +// +// PartialAttribute ::= SEQUENCE { +// type AttributeDescription, +// vals SET OF value AttributeValue } +// +// AttributeDescription ::= LDAPString +// -- Constrained to <attributedescription> +// -- [RFC4512] +// +// AttributeValue ::= OCTET STRING +// + +package ldap + +import ( + "errors" + "log" + + "gopkg.in/asn1-ber.v1" +) + +// Change operation choices +const ( + AddAttribute = 0 + DeleteAttribute = 1 + ReplaceAttribute = 2 +) + +// PartialAttribute for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511 +type PartialAttribute struct { + // Type is the type of the partial attribute + Type string + // Vals are the values of the partial attribute + Vals []string +} + +func (p *PartialAttribute) encode() *ber.Packet { + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "PartialAttribute") + seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, p.Type, "Type")) + set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue") + for _, value := range p.Vals { + set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals")) + } + seq.AppendChild(set) + return seq +} + +// ModifyRequest as defined in https://tools.ietf.org/html/rfc4511 +type ModifyRequest struct { + // DN is the distinguishedName of the directory entry to modify + DN string + // AddAttributes contain the attributes to add + AddAttributes []PartialAttribute + // DeleteAttributes contain the attributes to delete + DeleteAttributes []PartialAttribute + // ReplaceAttributes contain the attributes to replace + ReplaceAttributes []PartialAttribute +} + +// Add inserts the given attribute to the list of attributes to add +func (m *ModifyRequest) Add(attrType string, attrVals []string) { + m.AddAttributes = append(m.AddAttributes, PartialAttribute{Type: attrType, Vals: attrVals}) +} + +// Delete inserts the given attribute to the list of attributes to delete +func (m *ModifyRequest) Delete(attrType string, attrVals []string) { + m.DeleteAttributes = append(m.DeleteAttributes, PartialAttribute{Type: attrType, Vals: attrVals}) +} + +// Replace inserts the given attribute to the list of attributes to replace +func (m *ModifyRequest) Replace(attrType string, attrVals []string) { + m.ReplaceAttributes = append(m.ReplaceAttributes, PartialAttribute{Type: attrType, Vals: attrVals}) +} + +func (m ModifyRequest) encode() *ber.Packet { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyRequest, nil, "Modify Request") + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.DN, "DN")) + changes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Changes") + for _, attribute := range m.AddAttributes { + change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change") + change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(AddAttribute), "Operation")) + change.AppendChild(attribute.encode()) + changes.AppendChild(change) + } + for _, attribute := range m.DeleteAttributes { + change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change") + change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(DeleteAttribute), "Operation")) + change.AppendChild(attribute.encode()) + changes.AppendChild(change) + } + for _, attribute := range m.ReplaceAttributes { + change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change") + change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(ReplaceAttribute), "Operation")) + change.AppendChild(attribute.encode()) + changes.AppendChild(change) + } + request.AppendChild(changes) + return request +} + +// NewModifyRequest creates a modify request for the given DN +func NewModifyRequest( + dn string, +) *ModifyRequest { + return &ModifyRequest{ + DN: dn, + } +} + +// Modify performs the ModifyRequest +func (l *Conn) Modify(modifyRequest *ModifyRequest) error { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + packet.AppendChild(modifyRequest.encode()) + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationModifyResponse { + resultCode, resultDescription := getLDAPResultCode(packet) + if resultCode != 0 { + return NewError(resultCode, errors.New(resultDescription)) + } + } else { + log.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + + l.Debug.Printf("%d: returning", msgCtx.id) + return nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/passwdmodify.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/passwdmodify.go new file mode 100644 index 00000000..7d8246fd --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/passwdmodify.go @@ -0,0 +1,148 @@ +// This file contains the password modify extended operation as specified in rfc 3062 +// +// https://tools.ietf.org/html/rfc3062 +// + +package ldap + +import ( + "errors" + "fmt" + + "gopkg.in/asn1-ber.v1" +) + +const ( + passwordModifyOID = "1.3.6.1.4.1.4203.1.11.1" +) + +// PasswordModifyRequest implements the Password Modify Extended Operation as defined in https://www.ietf.org/rfc/rfc3062.txt +type PasswordModifyRequest struct { + // UserIdentity is an optional string representation of the user associated with the request. + // This string may or may not be an LDAPDN [RFC2253]. + // If no UserIdentity field is present, the request acts up upon the password of the user currently associated with the LDAP session + UserIdentity string + // OldPassword, if present, contains the user's current password + OldPassword string + // NewPassword, if present, contains the desired password for this user + NewPassword string +} + +// PasswordModifyResult holds the server response to a PasswordModifyRequest +type PasswordModifyResult struct { + // GeneratedPassword holds a password generated by the server, if present + GeneratedPassword string +} + +func (r *PasswordModifyRequest) encode() (*ber.Packet, error) { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Password Modify Extended Operation") + request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, passwordModifyOID, "Extended Request Name: Password Modify OID")) + extendedRequestValue := ber.Encode(ber.ClassContext, ber.TypePrimitive, 1, nil, "Extended Request Value: Password Modify Request") + passwordModifyRequestValue := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Password Modify Request") + if r.UserIdentity != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, r.UserIdentity, "User Identity")) + } + if r.OldPassword != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 1, r.OldPassword, "Old Password")) + } + if r.NewPassword != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 2, r.NewPassword, "New Password")) + } + + extendedRequestValue.AppendChild(passwordModifyRequestValue) + request.AppendChild(extendedRequestValue) + + return request, nil +} + +// NewPasswordModifyRequest creates a new PasswordModifyRequest +// +// According to the RFC 3602: +// userIdentity is a string representing the user associated with the request. +// This string may or may not be an LDAPDN (RFC 2253). +// If userIdentity is empty then the operation will act on the user associated +// with the session. +// +// oldPassword is the current user's password, it can be empty or it can be +// needed depending on the session user access rights (usually an administrator +// can change a user's password without knowing the current one) and the +// password policy (see pwdSafeModify password policy's attribute) +// +// newPassword is the desired user's password. If empty the server can return +// an error or generate a new password that will be available in the +// PasswordModifyResult.GeneratedPassword +// +func NewPasswordModifyRequest(userIdentity string, oldPassword string, newPassword string) *PasswordModifyRequest { + return &PasswordModifyRequest{ + UserIdentity: userIdentity, + OldPassword: oldPassword, + NewPassword: newPassword, + } +} + +// PasswordModify performs the modification request +func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + + encodedPasswordModifyRequest, err := passwordModifyRequest.encode() + if err != nil { + return nil, err + } + packet.AppendChild(encodedPasswordModifyRequest) + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + result := &PasswordModifyResult{} + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return nil, err + } + + if packet == nil { + return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve message")) + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return nil, err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationExtendedResponse { + resultCode, resultDescription := getLDAPResultCode(packet) + if resultCode != 0 { + return nil, NewError(resultCode, errors.New(resultDescription)) + } + } else { + return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("Unexpected Response: %d", packet.Children[1].Tag)) + } + + extendedResponse := packet.Children[1] + for _, child := range extendedResponse.Children { + if child.Tag == 11 { + passwordModifyResponseValue := ber.DecodePacket(child.Data.Bytes()) + if len(passwordModifyResponseValue.Children) == 1 { + if passwordModifyResponseValue.Children[0].Tag == 0 { + result.GeneratedPassword = ber.DecodeString(passwordModifyResponseValue.Children[0].Data.Bytes()) + } + } + } + } + + return result, nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/search.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/search.go new file mode 100644 index 00000000..2a99894c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap/search.go @@ -0,0 +1,450 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// File contains Search functionality +// +// https://tools.ietf.org/html/rfc4511 +// +// SearchRequest ::= [APPLICATION 3] SEQUENCE { +// baseObject LDAPDN, +// scope ENUMERATED { +// baseObject (0), +// singleLevel (1), +// wholeSubtree (2), +// ... }, +// derefAliases ENUMERATED { +// neverDerefAliases (0), +// derefInSearching (1), +// derefFindingBaseObj (2), +// derefAlways (3) }, +// sizeLimit INTEGER (0 .. maxInt), +// timeLimit INTEGER (0 .. maxInt), +// typesOnly BOOLEAN, +// filter Filter, +// attributes AttributeSelection } +// +// AttributeSelection ::= SEQUENCE OF selector LDAPString +// -- The LDAPString is constrained to +// -- <attributeSelector> in Section 4.5.1.8 +// +// Filter ::= CHOICE { +// and [0] SET SIZE (1..MAX) OF filter Filter, +// or [1] SET SIZE (1..MAX) OF filter Filter, +// not [2] Filter, +// equalityMatch [3] AttributeValueAssertion, +// substrings [4] SubstringFilter, +// greaterOrEqual [5] AttributeValueAssertion, +// lessOrEqual [6] AttributeValueAssertion, +// present [7] AttributeDescription, +// approxMatch [8] AttributeValueAssertion, +// extensibleMatch [9] MatchingRuleAssertion, +// ... } +// +// SubstringFilter ::= SEQUENCE { +// type AttributeDescription, +// substrings SEQUENCE SIZE (1..MAX) OF substring CHOICE { +// initial [0] AssertionValue, -- can occur at most once +// any [1] AssertionValue, +// final [2] AssertionValue } -- can occur at most once +// } +// +// MatchingRuleAssertion ::= SEQUENCE { +// matchingRule [1] MatchingRuleId OPTIONAL, +// type [2] AttributeDescription OPTIONAL, +// matchValue [3] AssertionValue, +// dnAttributes [4] BOOLEAN DEFAULT FALSE } +// +// + +package ldap + +import ( + "errors" + "fmt" + "sort" + "strings" + + "gopkg.in/asn1-ber.v1" +) + +// scope choices +const ( + ScopeBaseObject = 0 + ScopeSingleLevel = 1 + ScopeWholeSubtree = 2 +) + +// ScopeMap contains human readable descriptions of scope choices +var ScopeMap = map[int]string{ + ScopeBaseObject: "Base Object", + ScopeSingleLevel: "Single Level", + ScopeWholeSubtree: "Whole Subtree", +} + +// derefAliases +const ( + NeverDerefAliases = 0 + DerefInSearching = 1 + DerefFindingBaseObj = 2 + DerefAlways = 3 +) + +// DerefMap contains human readable descriptions of derefAliases choices +var DerefMap = map[int]string{ + NeverDerefAliases: "NeverDerefAliases", + DerefInSearching: "DerefInSearching", + DerefFindingBaseObj: "DerefFindingBaseObj", + DerefAlways: "DerefAlways", +} + +// NewEntry returns an Entry object with the specified distinguished name and attribute key-value pairs. +// The map of attributes is accessed in alphabetical order of the keys in order to ensure that, for the +// same input map of attributes, the output entry will contain the same order of attributes +func NewEntry(dn string, attributes map[string][]string) *Entry { + var attributeNames []string + for attributeName := range attributes { + attributeNames = append(attributeNames, attributeName) + } + sort.Strings(attributeNames) + + var encodedAttributes []*EntryAttribute + for _, attributeName := range attributeNames { + encodedAttributes = append(encodedAttributes, NewEntryAttribute(attributeName, attributes[attributeName])) + } + return &Entry{ + DN: dn, + Attributes: encodedAttributes, + } +} + +// Entry represents a single search result entry +type Entry struct { + // DN is the distinguished name of the entry + DN string + // Attributes are the returned attributes for the entry + Attributes []*EntryAttribute +} + +// GetAttributeValues returns the values for the named attribute, or an empty list +func (e *Entry) GetAttributeValues(attribute string) []string { + for _, attr := range e.Attributes { + if attr.Name == attribute { + return attr.Values + } + } + return []string{} +} + +// GetRawAttributeValues returns the byte values for the named attribute, or an empty list +func (e *Entry) GetRawAttributeValues(attribute string) [][]byte { + for _, attr := range e.Attributes { + if attr.Name == attribute { + return attr.ByteValues + } + } + return [][]byte{} +} + +// GetAttributeValue returns the first value for the named attribute, or "" +func (e *Entry) GetAttributeValue(attribute string) string { + values := e.GetAttributeValues(attribute) + if len(values) == 0 { + return "" + } + return values[0] +} + +// GetRawAttributeValue returns the first value for the named attribute, or an empty slice +func (e *Entry) GetRawAttributeValue(attribute string) []byte { + values := e.GetRawAttributeValues(attribute) + if len(values) == 0 { + return []byte{} + } + return values[0] +} + +// Print outputs a human-readable description +func (e *Entry) Print() { + fmt.Printf("DN: %s\n", e.DN) + for _, attr := range e.Attributes { + attr.Print() + } +} + +// PrettyPrint outputs a human-readable description indenting +func (e *Entry) PrettyPrint(indent int) { + fmt.Printf("%sDN: %s\n", strings.Repeat(" ", indent), e.DN) + for _, attr := range e.Attributes { + attr.PrettyPrint(indent + 2) + } +} + +// NewEntryAttribute returns a new EntryAttribute with the desired key-value pair +func NewEntryAttribute(name string, values []string) *EntryAttribute { + var bytes [][]byte + for _, value := range values { + bytes = append(bytes, []byte(value)) + } + return &EntryAttribute{ + Name: name, + Values: values, + ByteValues: bytes, + } +} + +// EntryAttribute holds a single attribute +type EntryAttribute struct { + // Name is the name of the attribute + Name string + // Values contain the string values of the attribute + Values []string + // ByteValues contain the raw values of the attribute + ByteValues [][]byte +} + +// Print outputs a human-readable description +func (e *EntryAttribute) Print() { + fmt.Printf("%s: %s\n", e.Name, e.Values) +} + +// PrettyPrint outputs a human-readable description with indenting +func (e *EntryAttribute) PrettyPrint(indent int) { + fmt.Printf("%s%s: %s\n", strings.Repeat(" ", indent), e.Name, e.Values) +} + +// SearchResult holds the server's response to a search request +type SearchResult struct { + // Entries are the returned entries + Entries []*Entry + // Referrals are the returned referrals + Referrals []string + // Controls are the returned controls + Controls []Control +} + +// Print outputs a human-readable description +func (s *SearchResult) Print() { + for _, entry := range s.Entries { + entry.Print() + } +} + +// PrettyPrint outputs a human-readable description with indenting +func (s *SearchResult) PrettyPrint(indent int) { + for _, entry := range s.Entries { + entry.PrettyPrint(indent) + } +} + +// SearchRequest represents a search request to send to the server +type SearchRequest struct { + BaseDN string + Scope int + DerefAliases int + SizeLimit int + TimeLimit int + TypesOnly bool + Filter string + Attributes []string + Controls []Control +} + +func (s *SearchRequest) encode() (*ber.Packet, error) { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationSearchRequest, nil, "Search Request") + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, s.BaseDN, "Base DN")) + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.Scope), "Scope")) + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.DerefAliases), "Deref Aliases")) + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.SizeLimit), "Size Limit")) + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.TimeLimit), "Time Limit")) + request.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, s.TypesOnly, "Types Only")) + // compile and encode filter + filterPacket, err := CompileFilter(s.Filter) + if err != nil { + return nil, err + } + request.AppendChild(filterPacket) + // encode attributes + attributesPacket := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes") + for _, attribute := range s.Attributes { + attributesPacket.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute")) + } + request.AppendChild(attributesPacket) + return request, nil +} + +// NewSearchRequest creates a new search request +func NewSearchRequest( + BaseDN string, + Scope, DerefAliases, SizeLimit, TimeLimit int, + TypesOnly bool, + Filter string, + Attributes []string, + Controls []Control, +) *SearchRequest { + return &SearchRequest{ + BaseDN: BaseDN, + Scope: Scope, + DerefAliases: DerefAliases, + SizeLimit: SizeLimit, + TimeLimit: TimeLimit, + TypesOnly: TypesOnly, + Filter: Filter, + Attributes: Attributes, + Controls: Controls, + } +} + +// SearchWithPaging accepts a search request and desired page size in order to execute LDAP queries to fulfill the +// search request. All paged LDAP query responses will be buffered and the final result will be returned atomically. +// The following four cases are possible given the arguments: +// - given SearchRequest missing a control of type ControlTypePaging: we will add one with the desired paging size +// - given SearchRequest contains a control of type ControlTypePaging that isn't actually a ControlPaging: fail without issuing any queries +// - given SearchRequest contains a control of type ControlTypePaging with pagingSize equal to the size requested: no change to the search request +// - given SearchRequest contains a control of type ControlTypePaging with pagingSize not equal to the size requested: fail without issuing any queries +// A requested pagingSize of 0 is interpreted as no limit by LDAP servers. +func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) { + var pagingControl *ControlPaging + + control := FindControl(searchRequest.Controls, ControlTypePaging) + if control == nil { + pagingControl = NewControlPaging(pagingSize) + searchRequest.Controls = append(searchRequest.Controls, pagingControl) + } else { + castControl, ok := control.(*ControlPaging) + if !ok { + return nil, fmt.Errorf("Expected paging control to be of type *ControlPaging, got %v", control) + } + if castControl.PagingSize != pagingSize { + return nil, fmt.Errorf("Paging size given in search request (%d) conflicts with size given in search call (%d)", castControl.PagingSize, pagingSize) + } + pagingControl = castControl + } + + searchResult := new(SearchResult) + for { + result, err := l.Search(searchRequest) + l.Debug.Printf("Looking for Paging Control...") + if err != nil { + return searchResult, err + } + if result == nil { + return searchResult, NewError(ErrorNetwork, errors.New("ldap: packet not received")) + } + + for _, entry := range result.Entries { + searchResult.Entries = append(searchResult.Entries, entry) + } + for _, referral := range result.Referrals { + searchResult.Referrals = append(searchResult.Referrals, referral) + } + for _, control := range result.Controls { + searchResult.Controls = append(searchResult.Controls, control) + } + + l.Debug.Printf("Looking for Paging Control...") + pagingResult := FindControl(result.Controls, ControlTypePaging) + if pagingResult == nil { + pagingControl = nil + l.Debug.Printf("Could not find paging control. Breaking...") + break + } + + cookie := pagingResult.(*ControlPaging).Cookie + if len(cookie) == 0 { + pagingControl = nil + l.Debug.Printf("Could not find cookie. Breaking...") + break + } + pagingControl.SetCookie(cookie) + } + + if pagingControl != nil { + l.Debug.Printf("Abandoning Paging...") + pagingControl.PagingSize = 0 + l.Search(searchRequest) + } + + return searchResult, nil +} + +// Search performs the given search request +func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + // encode search request + encodedSearchRequest, err := searchRequest.encode() + if err != nil { + return nil, err + } + packet.AppendChild(encodedSearchRequest) + // encode search controls + if searchRequest.Controls != nil { + packet.AppendChild(encodeControls(searchRequest.Controls)) + } + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + result := &SearchResult{ + Entries: make([]*Entry, 0), + Referrals: make([]string, 0), + Controls: make([]Control, 0)} + + foundSearchResultDone := false + for !foundSearchResultDone { + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return nil, err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return nil, err + } + ber.PrintPacket(packet) + } + + switch packet.Children[1].Tag { + case 4: + entry := new(Entry) + entry.DN = packet.Children[1].Children[0].Value.(string) + for _, child := range packet.Children[1].Children[1].Children { + attr := new(EntryAttribute) + attr.Name = child.Children[0].Value.(string) + for _, value := range child.Children[1].Children { + attr.Values = append(attr.Values, value.Value.(string)) + attr.ByteValues = append(attr.ByteValues, value.ByteValue) + } + entry.Attributes = append(entry.Attributes, attr) + } + result.Entries = append(result.Entries, entry) + case 5: + resultCode, resultDescription := getLDAPResultCode(packet) + if resultCode != 0 { + return result, NewError(resultCode, errors.New(resultDescription)) + } + if len(packet.Children) == 3 { + for _, child := range packet.Children[2].Children { + result.Controls = append(result.Controls, DecodeControl(child)) + } + } + foundSearchResultDone = true + case 19: + result.Referrals = append(result.Referrals, packet.Children[1].Children[0].Value.(string)) + } + } + l.Debug.Printf("%d: returning", msgCtx.id) + return result, nil +} diff --git a/vendor/github.com/mattermost/platform/vendor/golang.org/x/crypto/blowfish/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/LICENSE.txt index ead98cf0..ead98cf0 100644 --- a/vendor/github.com/mattermost/platform/vendor/golang.org/x/crypto/blowfish/LICENSE.txt +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/LICENSE.txt diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/dce.go index 50a0f2d0..50a0f2d0 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/dce.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/dce.go diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/doc.go index d8bd013e..d8bd013e 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/doc.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/doc.go diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/hash.go index a0420c1e..a0420c1e 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/hash.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/hash.go diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/marshal.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/marshal.go index 6621dd54..6621dd54 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/marshal.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/marshal.go diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/node.go index 42d60da8..42d60da8 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/node.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/node.go diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/sql.go index d015bfd1..d015bfd1 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/sql.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/sql.go diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/time.go index eedf2421..eedf2421 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/time.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/time.go diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/util.go index fc8e052c..fc8e052c 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/util.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/util.go diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/uuid.go index 7c643cf0..7c643cf0 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/uuid.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/uuid.go diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/version1.go index 0127eacf..0127eacf 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/version1.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/version1.go diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/version4.go index b3d4a368..b3d4a368 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pborman/uuid/version4.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid/version4.go diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/alecthomas/log4go/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/LICENSE.txt index e5875711..ead98cf0 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/alecthomas/log4go/LICENSE.txt +++ b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/LICENSE.txt @@ -1,32 +1,32 @@ Mattermost Licensing -SOFTWARE LICENSING +SOFTWARE LICENSING -You are licensed to use compiled versions of the Mattermost platform produced by Mattermost, Inc. under an MIT LICENSE +You are licensed to use compiled versions of the Mattermost platform produced by Mattermost, Inc. under an MIT LICENSE - See MIT-COMPILED-LICENSE.md included in compiled versions for details You may be licensed to use source code to create compiled versions not produced by Mattermost, Inc. in one of two ways: -1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or -2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com +1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or +2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com -You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/, model/, +You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/, model/, webapp/client, webapp/fonts, webapp/i18n, webapp/images and all subdirectories thereof) under the Apache License v2.0. -We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not +We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not link to the Mattermost Platform directly, but exclusively uses the Mattermost Admin Tools and Configuration Files, and -(b) you have not modified, added to or adapted the source code of Mattermost in a way that results in the creation of +(b) you have not modified, added to or adapted the source code of Mattermost in a way that results in the creation of a “modified version” or “work based on” Mattermost as these terms are defined in the AGPL v3.0 license. MATTERMOST TRADEMARK GUIDELINES -Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organization’s Trademark -Standards of Use at http://www.mattermost.org/trademark-standards-of-use/. For trademark approval or any questions -you have about using these trademarks, please email trademark@mattermost.com +Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organization’s Trademark +Standards of Use at http://www.mattermost.org/trademark-standards-of-use/. For trademark approval or any questions +you have about using these trademarks, please email trademark@mattermost.com ------------------------------------------------------------------------------------------------------------------------------ - + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/vendor/github.com/mattermost/platform/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/base64.go index fc311609..fc311609 100644 --- a/vendor/github.com/mattermost/platform/vendor/golang.org/x/crypto/bcrypt/base64.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/base64.go diff --git a/vendor/github.com/mattermost/platform/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/bcrypt.go index 202fa8af..aeb73f81 100644 --- a/vendor/github.com/mattermost/platform/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -241,11 +241,11 @@ func (p *hashed) Hash() []byte { n = 3 } arr[n] = '$' - n += 1 + n++ copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) n += 2 arr[n] = '$' - n += 1 + n++ copy(arr[n:], p.salt) n += encodedSaltSize copy(arr[n:], p.hash) diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/LICENSE.txt new file mode 100644 index 00000000..ead98cf0 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/LICENSE.txt @@ -0,0 +1,897 @@ +Mattermost Licensing + +SOFTWARE LICENSING + +You are licensed to use compiled versions of the Mattermost platform produced by Mattermost, Inc. under an MIT LICENSE + +- See MIT-COMPILED-LICENSE.md included in compiled versions for details + +You may be licensed to use source code to create compiled versions not produced by Mattermost, Inc. in one of two ways: + +1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or +2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com + +You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/, model/, +webapp/client, webapp/fonts, webapp/i18n, webapp/images and all subdirectories thereof) under the Apache License v2.0. + +We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not +link to the Mattermost Platform directly, but exclusively uses the Mattermost Admin Tools and Configuration Files, and +(b) you have not modified, added to or adapted the source code of Mattermost in a way that results in the creation of +a “modified version” or “work based on” Mattermost as these terms are defined in the AGPL v3.0 license. + +MATTERMOST TRADEMARK GUIDELINES + +Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organization’s Trademark +Standards of Use at http://www.mattermost.org/trademark-standards-of-use/. For trademark approval or any questions +you have about using these trademarks, please email trademark@mattermost.com + +------------------------------------------------------------------------------------------------------------------------------ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------------------------ + +The software is released under the terms of the GNU Affero General Public +License, version 3. + + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +<http://www.gnu.org/licenses/>. diff --git a/vendor/github.com/mattermost/platform/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/block.go index 9d80f195..9d80f195 100644 --- a/vendor/github.com/mattermost/platform/vendor/golang.org/x/crypto/blowfish/block.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/block.go diff --git a/vendor/github.com/mattermost/platform/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/cipher.go index 2641dadd..2641dadd 100644 --- a/vendor/github.com/mattermost/platform/vendor/golang.org/x/crypto/blowfish/cipher.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/cipher.go diff --git a/vendor/github.com/mattermost/platform/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/const.go index d0407759..d0407759 100644 --- a/vendor/github.com/mattermost/platform/vendor/golang.org/x/crypto/blowfish/const.go +++ b/vendor/github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish/const.go diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/LICENSE.txt new file mode 100644 index 00000000..ead98cf0 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/LICENSE.txt @@ -0,0 +1,897 @@ +Mattermost Licensing + +SOFTWARE LICENSING + +You are licensed to use compiled versions of the Mattermost platform produced by Mattermost, Inc. under an MIT LICENSE + +- See MIT-COMPILED-LICENSE.md included in compiled versions for details + +You may be licensed to use source code to create compiled versions not produced by Mattermost, Inc. in one of two ways: + +1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or +2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com + +You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/, model/, +webapp/client, webapp/fonts, webapp/i18n, webapp/images and all subdirectories thereof) under the Apache License v2.0. + +We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not +link to the Mattermost Platform directly, but exclusively uses the Mattermost Admin Tools and Configuration Files, and +(b) you have not modified, added to or adapted the source code of Mattermost in a way that results in the creation of +a “modified version” or “work based on” Mattermost as these terms are defined in the AGPL v3.0 license. + +MATTERMOST TRADEMARK GUIDELINES + +Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organization’s Trademark +Standards of Use at http://www.mattermost.org/trademark-standards-of-use/. For trademark approval or any questions +you have about using these trademarks, please email trademark@mattermost.com + +------------------------------------------------------------------------------------------------------------------------------ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------------------------ + +The software is released under the terms of the GNU Affero General Public +License, version 3. + + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +<http://www.gnu.org/licenses/>. diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/ber.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/ber.go new file mode 100644 index 00000000..25cc921b --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/ber.go @@ -0,0 +1,504 @@ +package ber + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "reflect" +) + +type Packet struct { + Identifier + Value interface{} + ByteValue []byte + Data *bytes.Buffer + Children []*Packet + Description string +} + +type Identifier struct { + ClassType Class + TagType Type + Tag Tag +} + +type Tag uint64 + +const ( + TagEOC Tag = 0x00 + TagBoolean Tag = 0x01 + TagInteger Tag = 0x02 + TagBitString Tag = 0x03 + TagOctetString Tag = 0x04 + TagNULL Tag = 0x05 + TagObjectIdentifier Tag = 0x06 + TagObjectDescriptor Tag = 0x07 + TagExternal Tag = 0x08 + TagRealFloat Tag = 0x09 + TagEnumerated Tag = 0x0a + TagEmbeddedPDV Tag = 0x0b + TagUTF8String Tag = 0x0c + TagRelativeOID Tag = 0x0d + TagSequence Tag = 0x10 + TagSet Tag = 0x11 + TagNumericString Tag = 0x12 + TagPrintableString Tag = 0x13 + TagT61String Tag = 0x14 + TagVideotexString Tag = 0x15 + TagIA5String Tag = 0x16 + TagUTCTime Tag = 0x17 + TagGeneralizedTime Tag = 0x18 + TagGraphicString Tag = 0x19 + TagVisibleString Tag = 0x1a + TagGeneralString Tag = 0x1b + TagUniversalString Tag = 0x1c + TagCharacterString Tag = 0x1d + TagBMPString Tag = 0x1e + TagBitmask Tag = 0x1f // xxx11111b + + // HighTag indicates the start of a high-tag byte sequence + HighTag Tag = 0x1f // xxx11111b + // HighTagContinueBitmask indicates the high-tag byte sequence should continue + HighTagContinueBitmask Tag = 0x80 // 10000000b + // HighTagValueBitmask obtains the tag value from a high-tag byte sequence byte + HighTagValueBitmask Tag = 0x7f // 01111111b +) + +const ( + // LengthLongFormBitmask is the mask to apply to the length byte to see if a long-form byte sequence is used + LengthLongFormBitmask = 0x80 + // LengthValueBitmask is the mask to apply to the length byte to get the number of bytes in the long-form byte sequence + LengthValueBitmask = 0x7f + + // LengthIndefinite is returned from readLength to indicate an indefinite length + LengthIndefinite = -1 +) + +var tagMap = map[Tag]string{ + TagEOC: "EOC (End-of-Content)", + TagBoolean: "Boolean", + TagInteger: "Integer", + TagBitString: "Bit String", + TagOctetString: "Octet String", + TagNULL: "NULL", + TagObjectIdentifier: "Object Identifier", + TagObjectDescriptor: "Object Descriptor", + TagExternal: "External", + TagRealFloat: "Real (float)", + TagEnumerated: "Enumerated", + TagEmbeddedPDV: "Embedded PDV", + TagUTF8String: "UTF8 String", + TagRelativeOID: "Relative-OID", + TagSequence: "Sequence and Sequence of", + TagSet: "Set and Set OF", + TagNumericString: "Numeric String", + TagPrintableString: "Printable String", + TagT61String: "T61 String", + TagVideotexString: "Videotex String", + TagIA5String: "IA5 String", + TagUTCTime: "UTC Time", + TagGeneralizedTime: "Generalized Time", + TagGraphicString: "Graphic String", + TagVisibleString: "Visible String", + TagGeneralString: "General String", + TagUniversalString: "Universal String", + TagCharacterString: "Character String", + TagBMPString: "BMP String", +} + +type Class uint8 + +const ( + ClassUniversal Class = 0 // 00xxxxxxb + ClassApplication Class = 64 // 01xxxxxxb + ClassContext Class = 128 // 10xxxxxxb + ClassPrivate Class = 192 // 11xxxxxxb + ClassBitmask Class = 192 // 11xxxxxxb +) + +var ClassMap = map[Class]string{ + ClassUniversal: "Universal", + ClassApplication: "Application", + ClassContext: "Context", + ClassPrivate: "Private", +} + +type Type uint8 + +const ( + TypePrimitive Type = 0 // xx0xxxxxb + TypeConstructed Type = 32 // xx1xxxxxb + TypeBitmask Type = 32 // xx1xxxxxb +) + +var TypeMap = map[Type]string{ + TypePrimitive: "Primitive", + TypeConstructed: "Constructed", +} + +var Debug bool = false + +func PrintBytes(out io.Writer, buf []byte, indent string) { + data_lines := make([]string, (len(buf)/30)+1) + num_lines := make([]string, (len(buf)/30)+1) + + for i, b := range buf { + data_lines[i/30] += fmt.Sprintf("%02x ", b) + num_lines[i/30] += fmt.Sprintf("%02d ", (i+1)%100) + } + + for i := 0; i < len(data_lines); i++ { + out.Write([]byte(indent + data_lines[i] + "\n")) + out.Write([]byte(indent + num_lines[i] + "\n\n")) + } +} + +func PrintPacket(p *Packet) { + printPacket(os.Stdout, p, 0, false) +} + +func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) { + indent_str := "" + + for len(indent_str) != indent { + indent_str += " " + } + + class_str := ClassMap[p.ClassType] + + tagtype_str := TypeMap[p.TagType] + + tag_str := fmt.Sprintf("0x%02X", p.Tag) + + if p.ClassType == ClassUniversal { + tag_str = tagMap[p.Tag] + } + + value := fmt.Sprint(p.Value) + description := "" + + if p.Description != "" { + description = p.Description + ": " + } + + fmt.Fprintf(out, "%s%s(%s, %s, %s) Len=%d %q\n", indent_str, description, class_str, tagtype_str, tag_str, p.Data.Len(), value) + + if printBytes { + PrintBytes(out, p.Bytes(), indent_str) + } + + for _, child := range p.Children { + printPacket(out, child, indent+1, printBytes) + } +} + +// ReadPacket reads a single Packet from the reader +func ReadPacket(reader io.Reader) (*Packet, error) { + p, _, err := readPacket(reader) + if err != nil { + return nil, err + } + return p, nil +} + +func DecodeString(data []byte) string { + return string(data) +} + +func parseInt64(bytes []byte) (ret int64, err error) { + if len(bytes) > 8 { + // We'll overflow an int64 in this case. + err = fmt.Errorf("integer too large") + return + } + for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { + ret <<= 8 + ret |= int64(bytes[bytesRead]) + } + + // Shift up and down in order to sign extend the result. + ret <<= 64 - uint8(len(bytes))*8 + ret >>= 64 - uint8(len(bytes))*8 + return +} + +func encodeInteger(i int64) []byte { + n := int64Length(i) + out := make([]byte, n) + + var j int + for ; n > 0; n-- { + out[j] = (byte(i >> uint((n-1)*8))) + j++ + } + + return out +} + +func int64Length(i int64) (numBytes int) { + numBytes = 1 + + for i > 127 { + numBytes++ + i >>= 8 + } + + for i < -128 { + numBytes++ + i >>= 8 + } + + return +} + +// DecodePacket decodes the given bytes into a single Packet +// If a decode error is encountered, nil is returned. +func DecodePacket(data []byte) *Packet { + p, _, _ := readPacket(bytes.NewBuffer(data)) + + return p +} + +// DecodePacketErr decodes the given bytes into a single Packet +// If a decode error is encountered, nil is returned +func DecodePacketErr(data []byte) (*Packet, error) { + p, _, err := readPacket(bytes.NewBuffer(data)) + if err != nil { + return nil, err + } + return p, nil +} + +// readPacket reads a single Packet from the reader, returning the number of bytes read +func readPacket(reader io.Reader) (*Packet, int, error) { + identifier, length, read, err := readHeader(reader) + if err != nil { + return nil, read, err + } + + p := &Packet{ + Identifier: identifier, + } + + p.Data = new(bytes.Buffer) + p.Children = make([]*Packet, 0, 2) + p.Value = nil + + if p.TagType == TypeConstructed { + // TODO: if universal, ensure tag type is allowed to be constructed + + // Track how much content we've read + contentRead := 0 + for { + if length != LengthIndefinite { + // End if we've read what we've been told to + if contentRead == length { + break + } + // Detect if a packet boundary didn't fall on the expected length + if contentRead > length { + return nil, read, fmt.Errorf("expected to read %d bytes, read %d", length, contentRead) + } + } + + // Read the next packet + child, r, err := readPacket(reader) + if err != nil { + return nil, read, err + } + contentRead += r + read += r + + // Test is this is the EOC marker for our packet + if isEOCPacket(child) { + if length == LengthIndefinite { + break + } + return nil, read, errors.New("eoc child not allowed with definite length") + } + + // Append and continue + p.AppendChild(child) + } + return p, read, nil + } + + if length == LengthIndefinite { + return nil, read, errors.New("indefinite length used with primitive type") + } + + // Read definite-length content + content := make([]byte, length, length) + if length > 0 { + _, err := io.ReadFull(reader, content) + if err != nil { + if err == io.EOF { + return nil, read, io.ErrUnexpectedEOF + } + return nil, read, err + } + read += length + } + + if p.ClassType == ClassUniversal { + p.Data.Write(content) + p.ByteValue = content + + switch p.Tag { + case TagEOC: + case TagBoolean: + val, _ := parseInt64(content) + + p.Value = val != 0 + case TagInteger: + p.Value, _ = parseInt64(content) + case TagBitString: + case TagOctetString: + // the actual string encoding is not known here + // (e.g. for LDAP content is already an UTF8-encoded + // string). Return the data without further processing + p.Value = DecodeString(content) + case TagNULL: + case TagObjectIdentifier: + case TagObjectDescriptor: + case TagExternal: + case TagRealFloat: + case TagEnumerated: + p.Value, _ = parseInt64(content) + case TagEmbeddedPDV: + case TagUTF8String: + p.Value = DecodeString(content) + case TagRelativeOID: + case TagSequence: + case TagSet: + case TagNumericString: + case TagPrintableString: + p.Value = DecodeString(content) + case TagT61String: + case TagVideotexString: + case TagIA5String: + case TagUTCTime: + case TagGeneralizedTime: + case TagGraphicString: + case TagVisibleString: + case TagGeneralString: + case TagUniversalString: + case TagCharacterString: + case TagBMPString: + } + } else { + p.Data.Write(content) + } + + return p, read, nil +} + +func (p *Packet) Bytes() []byte { + var out bytes.Buffer + + out.Write(encodeIdentifier(p.Identifier)) + out.Write(encodeLength(p.Data.Len())) + out.Write(p.Data.Bytes()) + + return out.Bytes() +} + +func (p *Packet) AppendChild(child *Packet) { + p.Data.Write(child.Bytes()) + p.Children = append(p.Children, child) +} + +func Encode(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet { + p := new(Packet) + + p.ClassType = ClassType + p.TagType = TagType + p.Tag = Tag + p.Data = new(bytes.Buffer) + + p.Children = make([]*Packet, 0, 2) + + p.Value = Value + p.Description = Description + + if Value != nil { + v := reflect.ValueOf(Value) + + if ClassType == ClassUniversal { + switch Tag { + case TagOctetString: + sv, ok := v.Interface().(string) + + if ok { + p.Data.Write([]byte(sv)) + } + } + } + } + + return p +} + +func NewSequence(Description string) *Packet { + return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, Description) +} + +func NewBoolean(ClassType Class, TagType Type, Tag Tag, Value bool, Description string) *Packet { + intValue := int64(0) + + if Value { + intValue = 1 + } + + p := Encode(ClassType, TagType, Tag, nil, Description) + + p.Value = Value + p.Data.Write(encodeInteger(intValue)) + + return p +} + +func NewInteger(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet { + p := Encode(ClassType, TagType, Tag, nil, Description) + + p.Value = Value + switch v := Value.(type) { + case int: + p.Data.Write(encodeInteger(int64(v))) + case uint: + p.Data.Write(encodeInteger(int64(v))) + case int64: + p.Data.Write(encodeInteger(v)) + case uint64: + // TODO : check range or add encodeUInt... + p.Data.Write(encodeInteger(int64(v))) + case int32: + p.Data.Write(encodeInteger(int64(v))) + case uint32: + p.Data.Write(encodeInteger(int64(v))) + case int16: + p.Data.Write(encodeInteger(int64(v))) + case uint16: + p.Data.Write(encodeInteger(int64(v))) + case int8: + p.Data.Write(encodeInteger(int64(v))) + case uint8: + p.Data.Write(encodeInteger(int64(v))) + default: + // TODO : add support for big.Int ? + panic(fmt.Sprintf("Invalid type %T, expected {u|}int{64|32|16|8}", v)) + } + + return p +} + +func NewString(ClassType Class, TagType Type, Tag Tag, Value, Description string) *Packet { + p := Encode(ClassType, TagType, Tag, nil, Description) + + p.Value = Value + p.Data.Write([]byte(Value)) + + return p +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/content_int.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/content_int.go new file mode 100644 index 00000000..1858b74b --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/content_int.go @@ -0,0 +1,25 @@ +package ber + +func encodeUnsignedInteger(i uint64) []byte { + n := uint64Length(i) + out := make([]byte, n) + + var j int + for ; n > 0; n-- { + out[j] = (byte(i >> uint((n-1)*8))) + j++ + } + + return out +} + +func uint64Length(i uint64) (numBytes int) { + numBytes = 1 + + for i > 255 { + numBytes++ + i >>= 8 + } + + return +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/header.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/header.go new file mode 100644 index 00000000..123744e9 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/header.go @@ -0,0 +1,29 @@ +package ber + +import ( + "errors" + "io" +) + +func readHeader(reader io.Reader) (identifier Identifier, length int, read int, err error) { + if i, c, err := readIdentifier(reader); err != nil { + return Identifier{}, 0, read, err + } else { + identifier = i + read += c + } + + if l, c, err := readLength(reader); err != nil { + return Identifier{}, 0, read, err + } else { + length = l + read += c + } + + // Validate length type with identifier (x.600, 8.1.3.2.a) + if length == LengthIndefinite && identifier.TagType == TypePrimitive { + return Identifier{}, 0, read, errors.New("indefinite length used with primitive type") + } + + return identifier, length, read, nil +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/identifier.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/identifier.go new file mode 100644 index 00000000..f7672a84 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/identifier.go @@ -0,0 +1,103 @@ +package ber + +import ( + "errors" + "fmt" + "io" + "math" +) + +func readIdentifier(reader io.Reader) (Identifier, int, error) { + identifier := Identifier{} + read := 0 + + // identifier byte + b, err := readByte(reader) + if err != nil { + if Debug { + fmt.Printf("error reading identifier byte: %v\n", err) + } + return Identifier{}, read, err + } + read++ + + identifier.ClassType = Class(b) & ClassBitmask + identifier.TagType = Type(b) & TypeBitmask + + if tag := Tag(b) & TagBitmask; tag != HighTag { + // short-form tag + identifier.Tag = tag + return identifier, read, nil + } + + // high-tag-number tag + tagBytes := 0 + for { + b, err := readByte(reader) + if err != nil { + if Debug { + fmt.Printf("error reading high-tag-number tag byte %d: %v\n", tagBytes, err) + } + return Identifier{}, read, err + } + tagBytes++ + read++ + + // Lowest 7 bits get appended to the tag value (x.690, 8.1.2.4.2.b) + identifier.Tag <<= 7 + identifier.Tag |= Tag(b) & HighTagValueBitmask + + // First byte may not be all zeros (x.690, 8.1.2.4.2.c) + if tagBytes == 1 && identifier.Tag == 0 { + return Identifier{}, read, errors.New("invalid first high-tag-number tag byte") + } + // Overflow of int64 + // TODO: support big int tags? + if tagBytes > 9 { + return Identifier{}, read, errors.New("high-tag-number tag overflow") + } + + // Top bit of 0 means this is the last byte in the high-tag-number tag (x.690, 8.1.2.4.2.a) + if Tag(b)&HighTagContinueBitmask == 0 { + break + } + } + + return identifier, read, nil +} + +func encodeIdentifier(identifier Identifier) []byte { + b := []byte{0x0} + b[0] |= byte(identifier.ClassType) + b[0] |= byte(identifier.TagType) + + if identifier.Tag < HighTag { + // Short-form + b[0] |= byte(identifier.Tag) + } else { + // high-tag-number + b[0] |= byte(HighTag) + + tag := identifier.Tag + + highBit := uint(63) + for { + if tag&(1<<highBit) != 0 { + break + } + highBit-- + } + + tagBytes := int(math.Ceil(float64(highBit) / 7.0)) + for i := tagBytes - 1; i >= 0; i-- { + offset := uint(i) * 7 + mask := Tag(0x7f) << offset + tagByte := (tag & mask) >> offset + if i != 0 { + tagByte |= 0x80 + } + b = append(b, byte(tagByte)) + } + } + return b +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/length.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/length.go new file mode 100644 index 00000000..750e8f44 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/length.go @@ -0,0 +1,81 @@ +package ber + +import ( + "errors" + "fmt" + "io" +) + +func readLength(reader io.Reader) (length int, read int, err error) { + // length byte + b, err := readByte(reader) + if err != nil { + if Debug { + fmt.Printf("error reading length byte: %v\n", err) + } + return 0, 0, err + } + read++ + + switch { + case b == 0xFF: + // Invalid 0xFF (x.600, 8.1.3.5.c) + return 0, read, errors.New("invalid length byte 0xff") + + case b == LengthLongFormBitmask: + // Indefinite form, we have to decode packets until we encounter an EOC packet (x.600, 8.1.3.6) + length = LengthIndefinite + + case b&LengthLongFormBitmask == 0: + // Short definite form, extract the length from the bottom 7 bits (x.600, 8.1.3.4) + length = int(b) & LengthValueBitmask + + case b&LengthLongFormBitmask != 0: + // Long definite form, extract the number of length bytes to follow from the bottom 7 bits (x.600, 8.1.3.5.b) + lengthBytes := int(b) & LengthValueBitmask + // Protect against overflow + // TODO: support big int length? + if lengthBytes > 8 { + return 0, read, errors.New("long-form length overflow") + } + + // Accumulate into a 64-bit variable + var length64 int64 + for i := 0; i < lengthBytes; i++ { + b, err = readByte(reader) + if err != nil { + if Debug { + fmt.Printf("error reading long-form length byte %d: %v\n", i, err) + } + return 0, read, err + } + read++ + + // x.600, 8.1.3.5 + length64 <<= 8 + length64 |= int64(b) + } + + // Cast to a platform-specific integer + length = int(length64) + // Ensure we didn't overflow + if int64(length) != length64 { + return 0, read, errors.New("long-form length overflow") + } + + default: + return 0, read, errors.New("invalid length byte") + } + + return length, read, nil +} + +func encodeLength(length int) []byte { + length_bytes := encodeUnsignedInteger(uint64(length)) + if length > 127 || len(length_bytes) > 1 { + longFormBytes := []byte{(LengthLongFormBitmask | byte(len(length_bytes)))} + longFormBytes = append(longFormBytes, length_bytes...) + length_bytes = longFormBytes + } + return length_bytes +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/util.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/util.go new file mode 100644 index 00000000..3e56b66c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1/util.go @@ -0,0 +1,24 @@ +package ber + +import "io" + +func readByte(reader io.Reader) (byte, error) { + bytes := make([]byte, 1, 1) + _, err := io.ReadFull(reader, bytes) + if err != nil { + if err == io.EOF { + return 0, io.ErrUnexpectedEOF + } + return 0, err + } + return bytes[0], nil +} + +func isEOCPacket(p *Packet) bool { + return p != nil && + p.Tag == TagEOC && + p.ClassType == ClassUniversal && + p.TagType == TypePrimitive && + len(p.ByteValue) == 0 && + len(p.Children) == 0 +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/LICENSE.txt b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/LICENSE.txt new file mode 100644 index 00000000..ead98cf0 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/LICENSE.txt @@ -0,0 +1,897 @@ +Mattermost Licensing + +SOFTWARE LICENSING + +You are licensed to use compiled versions of the Mattermost platform produced by Mattermost, Inc. under an MIT LICENSE + +- See MIT-COMPILED-LICENSE.md included in compiled versions for details + +You may be licensed to use source code to create compiled versions not produced by Mattermost, Inc. in one of two ways: + +1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or +2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com + +You are licensed to use the source code in Admin Tools and Configuration Files (templates/, config/, model/, +webapp/client, webapp/fonts, webapp/i18n, webapp/images and all subdirectories thereof) under the Apache License v2.0. + +We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not +link to the Mattermost Platform directly, but exclusively uses the Mattermost Admin Tools and Configuration Files, and +(b) you have not modified, added to or adapted the source code of Mattermost in a way that results in the creation of +a “modified version” or “work based on” Mattermost as these terms are defined in the AGPL v3.0 license. + +MATTERMOST TRADEMARK GUIDELINES + +Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organization’s Trademark +Standards of Use at http://www.mattermost.org/trademark-standards-of-use/. For trademark approval or any questions +you have about using these trademarks, please email trademark@mattermost.com + +------------------------------------------------------------------------------------------------------------------------------ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------------------------ + +The software is released under the terms of the GNU Affero General Public +License, version 3. + + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +<http://www.gnu.org/licenses/>. diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/apic.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 00000000..95ec014e --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,742 @@ +package yaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/decode.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 00000000..e85eb2e3 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,685 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool + mapType reflect.Type + terrors []string + strict bool +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[string]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + failf("unknown anchor '%s' referenced", n.value) + } + if d.aliases[n.value] { + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if s, ok := resolved.(string); ok && out.CanAddr() { + if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { + err := u.UnmarshalText([]byte(s)) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + good = true + } else if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + good = true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case uint64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + if !good { + d.terror(n, tag, out) + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + out.Set(out.Slice(0, j)) + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in struct %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 00000000..dcaf502f --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1684 @@ +package yaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/encode.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 00000000..84f84995 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,306 @@ +package yaml + +import ( + "encoding" + "fmt" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() { + e.nilv() + return + } + iface := in.Interface() + if m, ok := iface.(Marshaler); ok { + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + } else if m, ok := iface.(encoding.TextMarshaler); ok { + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + rtag, rs := resolve("", s) + if rtag == yaml_BINARY_TAG { + if tag == "" || tag == yaml_STR_TAG { + tag = rtag + s = rs.(string) + } else if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } else { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + } + if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else if strings.Contains(s, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 00000000..81d05dfe --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected <document start>", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 00000000..f4507917 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,394 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 00000000..232313cc --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,208 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt(plain[3:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, -int(intv) + } else { + return yaml_INT_TAG, -intv + } + } + } + // XXX Handle timestamps here. + + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + if tag == yaml_BINARY_TAG { + return yaml_BINARY_TAG, in + } + if utf8.ValidString(in) { + return yaml_STR_TAG, in + } + return yaml_BINARY_TAG, encodeBase64(in) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 00000000..07448445 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 00000000..5958822f --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,104 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 00000000..190362f2 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,89 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 00000000..5e3c2dae --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,357 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only unmarshalled if they are exported (have an upper case +// first letter), and are unmarshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 00000000..3caeca04 --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,716 @@ +package yaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "<unknown token>" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "<unknown parser state>" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 00000000..8110ce3c --- /dev/null +++ b/vendor/github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/github.com/mattermost/platform/einterfaces/account_migration.go b/vendor/github.com/mattermost/platform/einterfaces/account_migration.go deleted file mode 100644 index c9534ab6..00000000 --- a/vendor/github.com/mattermost/platform/einterfaces/account_migration.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package einterfaces - -import "github.com/mattermost/platform/model" - -type AccountMigrationInterface interface { - MigrateToLdap(fromAuthService string, forignUserFieldNameToMatch string, force bool) *model.AppError -} - -var theAccountMigrationInterface AccountMigrationInterface - -func RegisterAccountMigrationInterface(newInterface AccountMigrationInterface) { - theAccountMigrationInterface = newInterface -} - -func GetAccountMigrationInterface() AccountMigrationInterface { - return theAccountMigrationInterface -} diff --git a/vendor/github.com/mattermost/platform/einterfaces/compliance.go b/vendor/github.com/mattermost/platform/einterfaces/compliance.go deleted file mode 100644 index b7c087e8..00000000 --- a/vendor/github.com/mattermost/platform/einterfaces/compliance.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package einterfaces - -import ( - "github.com/mattermost/platform/model" -) - -type ComplianceInterface interface { - StartComplianceDailyJob() - RunComplianceJob(job *model.Compliance) *model.AppError -} - -var theComplianceInterface ComplianceInterface - -func RegisterComplianceInterface(newInterface ComplianceInterface) { - theComplianceInterface = newInterface -} - -func GetComplianceInterface() ComplianceInterface { - return theComplianceInterface -} diff --git a/vendor/github.com/mattermost/platform/einterfaces/emoji.go b/vendor/github.com/mattermost/platform/einterfaces/emoji.go deleted file mode 100644 index 45d1432d..00000000 --- a/vendor/github.com/mattermost/platform/einterfaces/emoji.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package einterfaces - -import ( - "github.com/mattermost/platform/model" -) - -type EmojiInterface interface { - CanUserCreateEmoji(string, []*model.TeamMember) bool -} - -var theEmojiInterface EmojiInterface - -func RegisterEmojiInterface(newInterface EmojiInterface) { - theEmojiInterface = newInterface -} - -func GetEmojiInterface() EmojiInterface { - return theEmojiInterface -} diff --git a/vendor/github.com/mattermost/platform/einterfaces/jobs/data_retention.go b/vendor/github.com/mattermost/platform/einterfaces/jobs/data_retention.go deleted file mode 100644 index 442f667f..00000000 --- a/vendor/github.com/mattermost/platform/einterfaces/jobs/data_retention.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package jobs - -import ( - "github.com/mattermost/platform/model" -) - -type DataRetentionInterface interface { - MakeWorker() model.Worker - MakeScheduler() model.Scheduler -} - -var theDataRetentionInterface DataRetentionInterface - -func RegisterDataRetentionInterface(newInterface DataRetentionInterface) { - theDataRetentionInterface = newInterface -} - -func GetDataRetentionInterface() DataRetentionInterface { - return theDataRetentionInterface -} diff --git a/vendor/github.com/mattermost/platform/einterfaces/jobs/elasticsearch.go b/vendor/github.com/mattermost/platform/einterfaces/jobs/elasticsearch.go deleted file mode 100644 index 6d6dbe89..00000000 --- a/vendor/github.com/mattermost/platform/einterfaces/jobs/elasticsearch.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package jobs - -import ( - "github.com/mattermost/platform/model" -) - -type ElasticsearchIndexerInterface interface { - MakeWorker() model.Worker -} - -var theElasticsearchIndexerInterface ElasticsearchIndexerInterface - -func RegisterElasticsearchIndexerInterface(newInterface ElasticsearchIndexerInterface) { - theElasticsearchIndexerInterface = newInterface -} - -func GetElasticsearchIndexerInterface() ElasticsearchIndexerInterface { - return theElasticsearchIndexerInterface -} diff --git a/vendor/github.com/mattermost/platform/model/access.go b/vendor/github.com/mattermost/platform/model/access.go index 9e16ed58..551ef930 100644 --- a/vendor/github.com/mattermost/platform/model/access.go +++ b/vendor/github.com/mattermost/platform/model/access.go @@ -6,6 +6,7 @@ package model import ( "encoding/json" "io" + "net/http" ) const ( @@ -37,23 +38,23 @@ type AccessResponse struct { func (ad *AccessData) IsValid() *AppError { if len(ad.ClientId) == 0 || len(ad.ClientId) > 26 { - return NewLocAppError("AccessData.IsValid", "model.access.is_valid.client_id.app_error", nil, "") + return NewAppError("AccessData.IsValid", "model.access.is_valid.client_id.app_error", nil, "", http.StatusBadRequest) } if len(ad.UserId) == 0 || len(ad.UserId) > 26 { - return NewLocAppError("AccessData.IsValid", "model.access.is_valid.user_id.app_error", nil, "") + return NewAppError("AccessData.IsValid", "model.access.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } if len(ad.Token) != 26 { - return NewLocAppError("AccessData.IsValid", "model.access.is_valid.access_token.app_error", nil, "") + return NewAppError("AccessData.IsValid", "model.access.is_valid.access_token.app_error", nil, "", http.StatusBadRequest) } if len(ad.RefreshToken) > 26 { - return NewLocAppError("AccessData.IsValid", "model.access.is_valid.refresh_token.app_error", nil, "") + return NewAppError("AccessData.IsValid", "model.access.is_valid.refresh_token.app_error", nil, "", http.StatusBadRequest) } if len(ad.RedirectUri) == 0 || len(ad.RedirectUri) > 256 || !IsValidHttpUrl(ad.RedirectUri) { - return NewLocAppError("AccessData.IsValid", "model.access.is_valid.redirect_uri.app_error", nil, "") + return NewAppError("AccessData.IsValid", "model.access.is_valid.redirect_uri.app_error", nil, "", http.StatusBadRequest) } return nil diff --git a/vendor/github.com/mattermost/platform/model/authorization.go b/vendor/github.com/mattermost/platform/model/authorization.go index d413e294..9f4e36ea 100644 --- a/vendor/github.com/mattermost/platform/model/authorization.go +++ b/vendor/github.com/mattermost/platform/model/authorization.go @@ -69,24 +69,24 @@ var PERMISSION_REVOKE_USER_ACCESS_TOKEN *Permission // admin functions but not others var PERMISSION_MANAGE_SYSTEM *Permission -var ROLE_SYSTEM_USER *Role -var ROLE_SYSTEM_ADMIN *Role -var ROLE_SYSTEM_POST_ALL *Role -var ROLE_SYSTEM_POST_ALL_PUBLIC *Role -var ROLE_SYSTEM_USER_ACCESS_TOKEN *Role +const ( + SYSTEM_USER_ROLE_ID = "system_user" + SYSTEM_ADMIN_ROLE_ID = "system_admin" + SYSTEM_POST_ALL_ROLE_ID = "system_post_all" + SYSTEM_POST_ALL_PUBLIC_ROLE_ID = "system_post_all_public" + SYSTEM_USER_ACCESS_TOKEN_ROLE_ID = "system_user_access_token" -var ROLE_TEAM_USER *Role -var ROLE_TEAM_ADMIN *Role -var ROLE_TEAM_POST_ALL *Role -var ROLE_TEAM_POST_ALL_PUBLIC *Role + TEAM_USER_ROLE_ID = "team_user" + TEAM_ADMIN_ROLE_ID = "team_admin" + TEAM_POST_ALL_ROLE_ID = "team_post_all" + TEAM_POST_ALL_PUBLIC_ROLE_ID = "team_post_all_public" -var ROLE_CHANNEL_USER *Role -var ROLE_CHANNEL_ADMIN *Role -var ROLE_CHANNEL_GUEST *Role + CHANNEL_USER_ROLE_ID = "channel_user" + CHANNEL_ADMIN_ROLE_ID = "channel_admin" + CHANNEL_GUEST_ROLE_ID = "guest" +) -var BuiltInRoles map[string]*Role - -func InitalizePermissions() { +func initializePermissions() { PERMISSION_INVITE_USER = &Permission{ "invite_user", "authentication.permissions.team_invite_user.name", @@ -329,11 +329,12 @@ func InitalizePermissions() { } } -func InitalizeRoles() { - InitalizePermissions() - BuiltInRoles = make(map[string]*Role) +var DefaultRoles map[string]*Role + +func initializeDefaultRoles() { + DefaultRoles = make(map[string]*Role) - ROLE_CHANNEL_USER = &Role{ + DefaultRoles[CHANNEL_USER_ROLE_ID] = &Role{ "channel_user", "authentication.roles.channel_user.name", "authentication.roles.channel_user.description", @@ -347,8 +348,8 @@ func InitalizeRoles() { PERMISSION_USE_SLASH_COMMANDS.Id, }, } - BuiltInRoles[ROLE_CHANNEL_USER.Id] = ROLE_CHANNEL_USER - ROLE_CHANNEL_ADMIN = &Role{ + + DefaultRoles[CHANNEL_ADMIN_ROLE_ID] = &Role{ "channel_admin", "authentication.roles.channel_admin.name", "authentication.roles.channel_admin.description", @@ -356,16 +357,15 @@ func InitalizeRoles() { PERMISSION_MANAGE_CHANNEL_ROLES.Id, }, } - BuiltInRoles[ROLE_CHANNEL_ADMIN.Id] = ROLE_CHANNEL_ADMIN - ROLE_CHANNEL_GUEST = &Role{ + + DefaultRoles[CHANNEL_GUEST_ROLE_ID] = &Role{ "guest", "authentication.roles.global_guest.name", "authentication.roles.global_guest.description", []string{}, } - BuiltInRoles[ROLE_CHANNEL_GUEST.Id] = ROLE_CHANNEL_GUEST - ROLE_TEAM_USER = &Role{ + DefaultRoles[TEAM_USER_ROLE_ID] = &Role{ "team_user", "authentication.roles.team_user.name", "authentication.roles.team_user.description", @@ -376,9 +376,8 @@ func InitalizeRoles() { PERMISSION_VIEW_TEAM.Id, }, } - BuiltInRoles[ROLE_TEAM_USER.Id] = ROLE_TEAM_USER - ROLE_TEAM_POST_ALL = &Role{ + DefaultRoles[TEAM_POST_ALL_ROLE_ID] = &Role{ "team_post_all", "authentication.roles.team_post_all.name", "authentication.roles.team_post_all.description", @@ -386,9 +385,8 @@ func InitalizeRoles() { PERMISSION_CREATE_POST.Id, }, } - BuiltInRoles[ROLE_TEAM_POST_ALL.Id] = ROLE_TEAM_POST_ALL - ROLE_TEAM_POST_ALL_PUBLIC = &Role{ + DefaultRoles[TEAM_POST_ALL_PUBLIC_ROLE_ID] = &Role{ "team_post_all_public", "authentication.roles.team_post_all_public.name", "authentication.roles.team_post_all_public.description", @@ -396,9 +394,8 @@ func InitalizeRoles() { PERMISSION_CREATE_POST_PUBLIC.Id, }, } - BuiltInRoles[ROLE_TEAM_POST_ALL_PUBLIC.Id] = ROLE_TEAM_POST_ALL_PUBLIC - ROLE_TEAM_ADMIN = &Role{ + DefaultRoles[TEAM_ADMIN_ROLE_ID] = &Role{ "team_admin", "authentication.roles.team_admin.name", "authentication.roles.team_admin.description", @@ -415,9 +412,8 @@ func InitalizeRoles() { PERMISSION_MANAGE_WEBHOOKS.Id, }, } - BuiltInRoles[ROLE_TEAM_ADMIN.Id] = ROLE_TEAM_ADMIN - ROLE_SYSTEM_USER = &Role{ + DefaultRoles[SYSTEM_USER_ROLE_ID] = &Role{ "system_user", "authentication.roles.global_user.name", "authentication.roles.global_user.description", @@ -427,9 +423,8 @@ func InitalizeRoles() { PERMISSION_PERMANENT_DELETE_USER.Id, }, } - BuiltInRoles[ROLE_SYSTEM_USER.Id] = ROLE_SYSTEM_USER - ROLE_SYSTEM_POST_ALL = &Role{ + DefaultRoles[SYSTEM_POST_ALL_ROLE_ID] = &Role{ "system_post_all", "authentication.roles.system_post_all.name", "authentication.roles.system_post_all.description", @@ -437,9 +432,8 @@ func InitalizeRoles() { PERMISSION_CREATE_POST.Id, }, } - BuiltInRoles[ROLE_SYSTEM_POST_ALL.Id] = ROLE_SYSTEM_POST_ALL - ROLE_SYSTEM_POST_ALL_PUBLIC = &Role{ + DefaultRoles[SYSTEM_POST_ALL_PUBLIC_ROLE_ID] = &Role{ "system_post_all_public", "authentication.roles.system_post_all_public.name", "authentication.roles.system_post_all_public.description", @@ -447,9 +441,8 @@ func InitalizeRoles() { PERMISSION_CREATE_POST_PUBLIC.Id, }, } - BuiltInRoles[ROLE_SYSTEM_POST_ALL_PUBLIC.Id] = ROLE_SYSTEM_POST_ALL_PUBLIC - ROLE_SYSTEM_USER_ACCESS_TOKEN = &Role{ + DefaultRoles[SYSTEM_USER_ACCESS_TOKEN_ROLE_ID] = &Role{ "system_user_access_token", "authentication.roles.system_user_access_token.name", "authentication.roles.system_user_access_token.description", @@ -459,9 +452,8 @@ func InitalizeRoles() { PERMISSION_REVOKE_USER_ACCESS_TOKEN.Id, }, } - BuiltInRoles[ROLE_SYSTEM_USER_ACCESS_TOKEN.Id] = ROLE_SYSTEM_USER_ACCESS_TOKEN - ROLE_SYSTEM_ADMIN = &Role{ + DefaultRoles[SYSTEM_ADMIN_ROLE_ID] = &Role{ "system_admin", "authentication.roles.global_admin.name", "authentication.roles.global_admin.description", @@ -500,17 +492,15 @@ func InitalizeRoles() { PERMISSION_READ_USER_ACCESS_TOKEN.Id, PERMISSION_REVOKE_USER_ACCESS_TOKEN.Id, }, - ROLE_TEAM_USER.Permissions..., + DefaultRoles[TEAM_USER_ROLE_ID].Permissions..., ), - ROLE_CHANNEL_USER.Permissions..., + DefaultRoles[CHANNEL_USER_ROLE_ID].Permissions..., ), - ROLE_TEAM_ADMIN.Permissions..., + DefaultRoles[TEAM_ADMIN_ROLE_ID].Permissions..., ), - ROLE_CHANNEL_ADMIN.Permissions..., + DefaultRoles[CHANNEL_ADMIN_ROLE_ID].Permissions..., ), } - BuiltInRoles[ROLE_SYSTEM_ADMIN.Id] = ROLE_SYSTEM_ADMIN - } func RoleIdsToString(roles []string) string { @@ -527,5 +517,6 @@ func RoleIdsToString(roles []string) string { } func init() { - InitalizeRoles() + initializePermissions() + initializeDefaultRoles() } diff --git a/vendor/github.com/mattermost/platform/model/authorize.go b/vendor/github.com/mattermost/platform/model/authorize.go index 460b7082..df07ff14 100644 --- a/vendor/github.com/mattermost/platform/model/authorize.go +++ b/vendor/github.com/mattermost/platform/model/authorize.go @@ -39,35 +39,35 @@ type AuthorizeRequest struct { func (ad *AuthData) IsValid() *AppError { if len(ad.ClientId) != 26 { - return NewLocAppError("AuthData.IsValid", "model.authorize.is_valid.client_id.app_error", nil, "") + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.client_id.app_error", nil, "", http.StatusBadRequest) } if len(ad.UserId) != 26 { - return NewLocAppError("AuthData.IsValid", "model.authorize.is_valid.user_id.app_error", nil, "") + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } if len(ad.Code) == 0 || len(ad.Code) > 128 { - return NewLocAppError("AuthData.IsValid", "model.authorize.is_valid.auth_code.app_error", nil, "client_id="+ad.ClientId) + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.auth_code.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest) } if ad.ExpiresIn == 0 { - return NewLocAppError("AuthData.IsValid", "model.authorize.is_valid.expires.app_error", nil, "") + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.expires.app_error", nil, "", http.StatusBadRequest) } if ad.CreateAt <= 0 { - return NewLocAppError("AuthData.IsValid", "model.authorize.is_valid.create_at.app_error", nil, "client_id="+ad.ClientId) + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.create_at.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest) } if len(ad.RedirectUri) == 0 || len(ad.RedirectUri) > 256 || !IsValidHttpUrl(ad.RedirectUri) { - return NewLocAppError("AuthData.IsValid", "model.authorize.is_valid.redirect_uri.app_error", nil, "client_id="+ad.ClientId) + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.redirect_uri.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest) } if len(ad.State) > 128 { - return NewLocAppError("AuthData.IsValid", "model.authorize.is_valid.state.app_error", nil, "client_id="+ad.ClientId) + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.state.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest) } if len(ad.Scope) > 128 { - return NewLocAppError("AuthData.IsValid", "model.authorize.is_valid.scope.app_error", nil, "client_id="+ad.ClientId) + return NewAppError("AuthData.IsValid", "model.authorize.is_valid.scope.app_error", nil, "client_id="+ad.ClientId, http.StatusBadRequest) } return nil @@ -155,10 +155,5 @@ func AuthorizeRequestFromJson(data io.Reader) *AuthorizeRequest { } func (ad *AuthData) IsExpired() bool { - - if GetMillis() > ad.CreateAt+int64(ad.ExpiresIn*1000) { - return true - } - - return false + return GetMillis() > ad.CreateAt+int64(ad.ExpiresIn*1000) } diff --git a/vendor/github.com/mattermost/platform/model/builtin.go b/vendor/github.com/mattermost/platform/model/builtin.go new file mode 100644 index 00000000..5dd00a96 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/builtin.go @@ -0,0 +1,9 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +func NewBool(b bool) *bool { return &b } +func NewInt(n int) *int { return &n } +func NewInt64(n int64) *int64 { return &n } +func NewString(s string) *string { return &s } diff --git a/vendor/github.com/mattermost/platform/model/bundle_info.go b/vendor/github.com/mattermost/platform/model/bundle_info.go new file mode 100644 index 00000000..6965159c --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/bundle_info.go @@ -0,0 +1,23 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +type BundleInfo struct { + Path string + + Manifest *Manifest + ManifestPath string + ManifestError error +} + +// Returns bundle info for the given path. The return value is never nil. +func BundleInfoForPath(path string) *BundleInfo { + m, mpath, err := FindManifest(path) + return &BundleInfo{ + Path: path, + Manifest: m, + ManifestPath: mpath, + ManifestError: err, + } +} diff --git a/vendor/github.com/mattermost/platform/model/channel.go b/vendor/github.com/mattermost/platform/model/channel.go index 50d48755..a4c733c3 100644 --- a/vendor/github.com/mattermost/platform/model/channel.go +++ b/vendor/github.com/mattermost/platform/model/channel.go @@ -8,6 +8,7 @@ import ( "encoding/hex" "encoding/json" "io" + "net/http" "sort" "strings" "unicode/utf8" @@ -24,6 +25,7 @@ const ( CHANNEL_DISPLAY_NAME_MAX_RUNES = 64 CHANNEL_NAME_MIN_LENGTH = 2 CHANNEL_NAME_MAX_LENGTH = 64 + CHANNEL_NAME_UI_MAX_LENGTH = 22 CHANNEL_HEADER_MAX_RUNES = 1024 CHANNEL_PURPOSE_MAX_RUNES = 250 CHANNEL_CACHE_SIZE = 25000 @@ -53,6 +55,11 @@ type ChannelPatch struct { Purpose *string `json:"purpose"` } +func (o *Channel) DeepCopy() *Channel { + copy := *o + return © +} + func (o *Channel) ToJson() string { b, err := json.Marshal(o) if err != nil { @@ -104,39 +111,39 @@ func (o *Channel) StatsEtag() string { func (o *Channel) IsValid() *AppError { if len(o.Id) != 26 { - return NewLocAppError("Channel.IsValid", "model.channel.is_valid.id.app_error", nil, "") + return NewAppError("Channel.IsValid", "model.channel.is_valid.id.app_error", nil, "", http.StatusBadRequest) } if o.CreateAt == 0 { - return NewLocAppError("Channel.IsValid", "model.channel.is_valid.create_at.app_error", nil, "id="+o.Id) + return NewAppError("Channel.IsValid", "model.channel.is_valid.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if o.UpdateAt == 0 { - return NewLocAppError("Channel.IsValid", "model.channel.is_valid.update_at.app_error", nil, "id="+o.Id) + return NewAppError("Channel.IsValid", "model.channel.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if utf8.RuneCountInString(o.DisplayName) > CHANNEL_DISPLAY_NAME_MAX_RUNES { - return NewLocAppError("Channel.IsValid", "model.channel.is_valid.display_name.app_error", nil, "id="+o.Id) + return NewAppError("Channel.IsValid", "model.channel.is_valid.display_name.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if !IsValidChannelIdentifier(o.Name) { - return NewLocAppError("Channel.IsValid", "model.channel.is_valid.2_or_more.app_error", nil, "id="+o.Id) + return NewAppError("Channel.IsValid", "model.channel.is_valid.2_or_more.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if !(o.Type == CHANNEL_OPEN || o.Type == CHANNEL_PRIVATE || o.Type == CHANNEL_DIRECT || o.Type == CHANNEL_GROUP) { - return NewLocAppError("Channel.IsValid", "model.channel.is_valid.type.app_error", nil, "id="+o.Id) + return NewAppError("Channel.IsValid", "model.channel.is_valid.type.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if utf8.RuneCountInString(o.Header) > CHANNEL_HEADER_MAX_RUNES { - return NewLocAppError("Channel.IsValid", "model.channel.is_valid.header.app_error", nil, "id="+o.Id) + return NewAppError("Channel.IsValid", "model.channel.is_valid.header.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if utf8.RuneCountInString(o.Purpose) > CHANNEL_PURPOSE_MAX_RUNES { - return NewLocAppError("Channel.IsValid", "model.channel.is_valid.purpose.app_error", nil, "id="+o.Id) + return NewAppError("Channel.IsValid", "model.channel.is_valid.purpose.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if len(o.CreatorId) > 26 { - return NewLocAppError("Channel.IsValid", "model.channel.is_valid.creator_id.app_error", nil, "") + return NewAppError("Channel.IsValid", "model.channel.is_valid.creator_id.app_error", nil, "", http.StatusBadRequest) } return nil diff --git a/vendor/github.com/mattermost/platform/model/channel_member.go b/vendor/github.com/mattermost/platform/model/channel_member.go index f6d58519..e7ad8232 100644 --- a/vendor/github.com/mattermost/platform/model/channel_member.go +++ b/vendor/github.com/mattermost/platform/model/channel_member.go @@ -6,6 +6,7 @@ package model import ( "encoding/json" "io" + "net/http" "strings" ) @@ -101,36 +102,32 @@ func ChannelMemberFromJson(data io.Reader) *ChannelMember { func (o *ChannelMember) IsValid() *AppError { if len(o.ChannelId) != 26 { - return NewLocAppError("ChannelMember.IsValid", "model.channel_member.is_valid.channel_id.app_error", nil, "") + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest) } if len(o.UserId) != 26 { - return NewLocAppError("ChannelMember.IsValid", "model.channel_member.is_valid.user_id.app_error", nil, "") + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } notifyLevel := o.NotifyProps[DESKTOP_NOTIFY_PROP] if len(notifyLevel) > 20 || !IsChannelNotifyLevelValid(notifyLevel) { - return NewLocAppError("ChannelMember.IsValid", "model.channel_member.is_valid.notify_level.app_error", - nil, "notify_level="+notifyLevel) + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.notify_level.app_error", nil, "notify_level="+notifyLevel, http.StatusBadRequest) } markUnreadLevel := o.NotifyProps[MARK_UNREAD_NOTIFY_PROP] if len(markUnreadLevel) > 20 || !IsChannelMarkUnreadLevelValid(markUnreadLevel) { - return NewLocAppError("ChannelMember.IsValid", "model.channel_member.is_valid.unread_level.app_error", - nil, "mark_unread_level="+markUnreadLevel) + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.unread_level.app_error", nil, "mark_unread_level="+markUnreadLevel, http.StatusBadRequest) } if pushLevel, ok := o.NotifyProps[PUSH_NOTIFY_PROP]; ok { if len(pushLevel) > 20 || !IsChannelNotifyLevelValid(pushLevel) { - return NewLocAppError("ChannelMember.IsValid", "model.channel_member.is_valid.push_level.app_error", - nil, "push_notification_level="+pushLevel) + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.push_level.app_error", nil, "push_notification_level="+pushLevel, http.StatusBadRequest) } } if sendEmail, ok := o.NotifyProps[EMAIL_NOTIFY_PROP]; ok { if len(sendEmail) > 20 || !IsSendEmailValid(sendEmail) { - return NewLocAppError("ChannelMember.IsValid", "model.channel_member.is_valid.email_value.app_error", - nil, "push_notification_level="+sendEmail) + return NewAppError("ChannelMember.IsValid", "model.channel_member.is_valid.email_value.app_error", nil, "push_notification_level="+sendEmail, http.StatusBadRequest) } } diff --git a/vendor/github.com/mattermost/platform/model/channel_member_history.go b/vendor/github.com/mattermost/platform/model/channel_member_history.go new file mode 100644 index 00000000..bc71b580 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/channel_member_history.go @@ -0,0 +1,12 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +type ChannelMemberHistory struct { + ChannelId string + UserId string + UserEmail string `db:"Email"` + JoinTime int64 + LeaveTime *int64 +} diff --git a/vendor/github.com/mattermost/platform/model/channel_view.go b/vendor/github.com/mattermost/platform/model/channel_view.go index 8a7ead76..e7b1de30 100644 --- a/vendor/github.com/mattermost/platform/model/channel_view.go +++ b/vendor/github.com/mattermost/platform/model/channel_view.go @@ -32,3 +32,28 @@ func ChannelViewFromJson(data io.Reader) *ChannelView { return nil } } + +type ChannelViewResponse struct { + Status string `json:"status"` + LastViewedAtTimes map[string]int64 `json:"last_viewed_at_times"` +} + +func (o *ChannelViewResponse) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func ChannelViewResponseFromJson(data io.Reader) *ChannelViewResponse { + decoder := json.NewDecoder(data) + var o ChannelViewResponse + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/client.go b/vendor/github.com/mattermost/platform/model/client.go index 564aa2e4..ef890b59 100644 --- a/vendor/github.com/mattermost/platform/model/client.go +++ b/vendor/github.com/mattermost/platform/model/client.go @@ -18,6 +18,8 @@ import ( l4g "github.com/alecthomas/log4go" ) +var UsedApiV3 *int32 = new(int32) + const ( HEADER_REQUEST_ID = "X-Request-ID" HEADER_VERSION_ID = "X-Version-ID" @@ -37,7 +39,7 @@ const ( STATUS_FAIL = "FAIL" STATUS_REMOVE = "REMOVE" - CLIENT_DIR = "webapp/dist" + CLIENT_DIR = "client" API_URL_SUFFIX_V1 = "/api/v1" API_URL_SUFFIX_V3 = "/api/v3" @@ -144,7 +146,7 @@ func (c *Client) DoPost(url, data, contentType string) (*http.Response, *AppErro rq.Close = true if rp, err := c.HttpClient.Do(rq); err != nil { - return nil, NewLocAppError(url, "model.client.connecting.app_error", nil, err.Error()) + return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0) } else if rp.StatusCode >= 300 { defer closeBody(rp) return nil, AppErrorFromJson(rp.Body) @@ -162,7 +164,7 @@ func (c *Client) DoApiPost(url string, data string) (*http.Response, *AppError) } if rp, err := c.HttpClient.Do(rq); err != nil { - return nil, NewLocAppError(url, "model.client.connecting.app_error", nil, err.Error()) + return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0) } else if rp.StatusCode >= 300 { defer closeBody(rp) return nil, AppErrorFromJson(rp.Body) @@ -184,7 +186,7 @@ func (c *Client) DoApiGet(url string, data string, etag string) (*http.Response, } if rp, err := c.HttpClient.Do(rq); err != nil { - return nil, NewLocAppError(url, "model.client.connecting.app_error", nil, err.Error()) + return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0) } else if rp.StatusCode == 304 { return rp, nil } else if rp.StatusCode >= 300 { @@ -429,7 +431,7 @@ func (c *Client) UpdateTeam(team *Team) (*Result, *AppError) { } else { defer closeBody(r) return &Result{r.Header.Get(HEADER_REQUEST_ID), - r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + r.Header.Get(HEADER_ETAG_SERVER), TeamFromJson(r.Body)}, nil } } @@ -677,7 +679,7 @@ func (c *Client) login(m map[string]string) (*Result, *AppError) { sessionToken := getCookie(SESSION_COOKIE_TOKEN, r) if c.AuthToken != sessionToken.Value { - NewLocAppError("/users/login", "model.client.login.app_error", nil, "") + NewAppError("/users/login", "model.client.login.app_error", nil, "", 0) } defer closeBody(r) @@ -1054,7 +1056,7 @@ func (c *Client) DownloadComplianceReport(id string) (*Result, *AppError) { } if rp, err := c.HttpClient.Do(rq); err != nil { - return nil, NewLocAppError("/admin/download_compliance_report", "model.client.connecting.app_error", nil, err.Error()) + return nil, NewAppError("/admin/download_compliance_report", "model.client.connecting.app_error", nil, err.Error(), 0) } else if rp.StatusCode >= 300 { defer rp.Body.Close() return nil, AppErrorFromJson(rp.Body) @@ -1527,19 +1529,19 @@ func (c *Client) UploadPostAttachment(data []byte, channelId string, filename st writer := multipart.NewWriter(body) if part, err := writer.CreateFormFile("files", filename); err != nil { - return nil, NewLocAppError("UploadPostAttachment", "model.client.upload_post_attachment.file.app_error", nil, err.Error()) + return nil, NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), 0) } else if _, err = io.Copy(part, bytes.NewBuffer(data)); err != nil { - return nil, NewLocAppError("UploadPostAttachment", "model.client.upload_post_attachment.file.app_error", nil, err.Error()) + return nil, NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.file.app_error", nil, err.Error(), 0) } if part, err := writer.CreateFormField("channel_id"); err != nil { - return nil, NewLocAppError("UploadPostAttachment", "model.client.upload_post_attachment.channel_id.app_error", nil, err.Error()) + return nil, NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.channel_id.app_error", nil, err.Error(), 0) } else if _, err = io.Copy(part, strings.NewReader(channelId)); err != nil { - return nil, NewLocAppError("UploadPostAttachment", "model.client.upload_post_attachment.channel_id.app_error", nil, err.Error()) + return nil, NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.channel_id.app_error", nil, err.Error(), 0) } if err := writer.Close(); err != nil { - return nil, NewLocAppError("UploadPostAttachment", "model.client.upload_post_attachment.writer.app_error", nil, err.Error()) + return nil, NewAppError("UploadPostAttachment", "model.client.upload_post_attachment.writer.app_error", nil, err.Error(), 0) } if result, err := c.uploadFile(c.ApiUrl+c.GetTeamRoute()+"/files/upload", body.Bytes(), writer.FormDataContentType()); err != nil { @@ -1559,7 +1561,7 @@ func (c *Client) uploadFile(url string, data []byte, contentType string) (*Resul } if rp, err := c.HttpClient.Do(rq); err != nil { - return nil, NewLocAppError(url, "model.client.connecting.app_error", nil, err.Error()) + return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0) } else if rp.StatusCode >= 300 { return nil, AppErrorFromJson(rp.Body) } else { @@ -2175,17 +2177,17 @@ func (c *Client) CreateEmoji(emoji *Emoji, image []byte, filename string) (*Emoj writer := multipart.NewWriter(body) if part, err := writer.CreateFormFile("image", filename); err != nil { - return nil, NewLocAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error()) + return nil, NewAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error(), 0) } else if _, err = io.Copy(part, bytes.NewBuffer(image)); err != nil { - return nil, NewLocAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error()) + return nil, NewAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error(), 0) } if err := writer.WriteField("emoji", emoji.ToJson()); err != nil { - return nil, NewLocAppError("CreateEmoji", "model.client.create_emoji.emoji.app_error", nil, err.Error()) + return nil, NewAppError("CreateEmoji", "model.client.create_emoji.emoji.app_error", nil, err.Error(), 0) } if err := writer.Close(); err != nil { - return nil, NewLocAppError("CreateEmoji", "model.client.create_emoji.writer.app_error", nil, err.Error()) + return nil, NewAppError("CreateEmoji", "model.client.create_emoji.writer.app_error", nil, err.Error(), 0) } rq, _ := http.NewRequest("POST", c.ApiUrl+c.GetEmojiRoute()+"/create", body) @@ -2197,7 +2199,7 @@ func (c *Client) CreateEmoji(emoji *Emoji, image []byte, filename string) (*Emoj } if r, err := c.HttpClient.Do(rq); err != nil { - return nil, NewLocAppError("CreateEmoji", "model.client.connecting.app_error", nil, err.Error()) + return nil, NewAppError("CreateEmoji", "model.client.connecting.app_error", nil, err.Error(), 0) } else if r.StatusCode >= 300 { return nil, AppErrorFromJson(r.Body) } else { @@ -2241,7 +2243,7 @@ func (c *Client) UploadCertificateFile(data []byte, contentType string) *AppErro } if rp, err := c.HttpClient.Do(rq); err != nil { - return NewLocAppError(url, "model.client.connecting.app_error", nil, err.Error()) + return NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0) } else if rp.StatusCode >= 300 { return AppErrorFromJson(rp.Body) } else { diff --git a/vendor/github.com/mattermost/platform/model/client4.go b/vendor/github.com/mattermost/platform/model/client4.go index 0f757853..e84a23e5 100644 --- a/vendor/github.com/mattermost/platform/model/client4.go +++ b/vendor/github.com/mattermost/platform/model/client4.go @@ -44,7 +44,7 @@ func BuildErrorResponse(r *http.Response, err *AppError) *Response { header = r.Header } else { statusCode = 0 - header = make(http.Header, 0) + header = make(http.Header) } return &Response{ @@ -178,6 +178,14 @@ func (c *Client4) GetFileRoute(fileId string) string { return fmt.Sprintf(c.GetFilesRoute()+"/%v", fileId) } +func (c *Client4) GetPluginsRoute() string { + return fmt.Sprintf("/plugins") +} + +func (c *Client4) GetPluginRoute(pluginId string) string { + return fmt.Sprintf(c.GetPluginsRoute()+"/%v", pluginId) +} + func (c *Client4) GetSystemRoute() string { return fmt.Sprintf("/system") } @@ -246,6 +254,10 @@ func (c *Client4) GetBrandRoute() string { return fmt.Sprintf("/brand") } +func (c *Client4) GetDataRetentionRoute() string { + return fmt.Sprintf("/data_retention") +} + func (c *Client4) GetElasticsearchRoute() string { return fmt.Sprintf("/elasticsearch") } @@ -319,7 +331,7 @@ func (c *Client4) DoApiRequest(method, url, data, etag string) (*http.Response, } if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil { - return nil, NewLocAppError(url, "model.client.connecting.app_error", nil, err.Error()) + return nil, NewAppError(url, "model.client.connecting.app_error", nil, err.Error(), 0) } else if rp.StatusCode == 304 { return rp, nil } else if rp.StatusCode >= 300 { @@ -754,6 +766,16 @@ func (c *Client4) PatchUser(userId string, patch *UserPatch) (*User, *Response) } } +// UpdateUserAuth updates a user AuthData (uthData, authService and password) in the system. +func (c *Client4) UpdateUserAuth(userId string, userAuth *UserAuth) (*UserAuth, *Response) { + if r, err := c.DoApiPut(c.GetUserRoute(userId)+"/auth", userAuth.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return UserAuthFromJson(r.Body), BuildResponse(r) + } +} + // UpdateUserMfa activates multi-factor authentication for a user if activate // is true and a valid code is provided. If activate is false, then code is not // required and multi-factor authentication is disabled for the user. @@ -889,6 +911,16 @@ func (c *Client4) RevokeSession(userId, sessionId string) (bool, *Response) { } } +// RevokeAllSessions revokes all sessions for the provided user id string. +func (c *Client4) RevokeAllSessions(userId string) (bool, *Response) { + if r, err := c.DoApiPost(c.GetUserRoute(userId)+"/sessions/revoke/all", ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + // AttachDeviceId attaches a mobile device ID to the current session. func (c *Client4) AttachDeviceId(deviceId string) (bool, *Response) { requestBody := map[string]string{"device_id": deviceId} @@ -1043,6 +1075,32 @@ func (c *Client4) RevokeUserAccessToken(tokenId string) (bool, *Response) { } } +// DisableUserAccessToken will disable a user access token by id. Must have the +// 'revoke_user_access_token' permission and if disabling for another user, must have the +// 'edit_other_users' permission. +func (c *Client4) DisableUserAccessToken(tokenId string) (bool, *Response) { + requestBody := map[string]string{"token_id": tokenId} + if r, err := c.DoApiPost(c.GetUsersRoute()+"/tokens/disable", MapToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// EnableUserAccessToken will enable a user access token by id. Must have the +// 'create_user_access_token' permission and if enabling for another user, must have the +// 'edit_other_users' permission. +func (c *Client4) EnableUserAccessToken(tokenId string) (bool, *Response) { + requestBody := map[string]string{"token_id": tokenId} + if r, err := c.DoApiPost(c.GetUsersRoute()+"/tokens/enable", MapToJson(requestBody)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + // Team Section // CreateTeam creates a team in the system based on the provided team struct. @@ -1562,13 +1620,13 @@ func (c *Client4) GetChannelMembersForUser(userId, teamId, etag string) (*Channe } // ViewChannel performs a view action for a user. Synonymous with switching channels or marking channels as read by a user. -func (c *Client4) ViewChannel(userId string, view *ChannelView) (bool, *Response) { +func (c *Client4) ViewChannel(userId string, view *ChannelView) (*ChannelViewResponse, *Response) { url := fmt.Sprintf(c.GetChannelsRoute()+"/members/%v/view", userId) if r, err := c.DoApiPost(url, view.ToJson()); err != nil { - return false, BuildErrorResponse(r, err) + return nil, BuildErrorResponse(r, err) } else { defer closeBody(r) - return CheckStatusOK(r), BuildResponse(r) + return ChannelViewResponseFromJson(r.Body), BuildResponse(r) } } @@ -1615,6 +1673,17 @@ func (c *Client4) AddChannelMember(channelId, userId string) (*ChannelMember, *R } } +// AddChannelMemberWithRootId adds user to channel and return a channel member. Post add to channel message has the postRootId. +func (c *Client4) AddChannelMemberWithRootId(channelId, userId, postRootId string) (*ChannelMember, *Response) { + requestBody := map[string]string{"user_id": userId, "post_root_id": postRootId} + if r, err := c.DoApiPost(c.GetChannelMembersRoute(channelId)+"", MapToJson(requestBody)); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ChannelMemberFromJson(r.Body), BuildResponse(r) + } +} + // RemoveUserFromChannel will delete the channel member object for a user, effectively removing the user from a channel. func (c *Client4) RemoveUserFromChannel(channelId, userId string) (bool, *Response) { if r, err := c.DoApiDelete(c.GetChannelMemberRoute(channelId, userId)); err != nil { @@ -1803,6 +1872,16 @@ func (c *Client4) SearchPosts(teamId string, terms string, isOrSearch bool) (*Po } } +// DoPostAction performs a post action. +func (c *Client4) DoPostAction(postId, actionId string) (bool, *Response) { + if r, err := c.DoApiPost(c.GetPostRoute(postId)+"/actions/"+actionId, ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + // File Section // UploadFile will upload a file to a channel, to be later attached to a post. @@ -2580,7 +2659,7 @@ func (c *Client4) UploadBrandImage(data []byte) (bool, *Response) { // GetLogs page of logs as a string array. func (c *Client4) GetLogs(page, perPage int) ([]string, *Response) { - query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) + query := fmt.Sprintf("?page=%v&logs_per_page=%v", page, perPage) if r, err := c.DoApiGet("/logs"+query, ""); err != nil { return nil, BuildErrorResponse(r, err) } else { @@ -2613,6 +2692,16 @@ func (c *Client4) CreateOAuthApp(app *OAuthApp) (*OAuthApp, *Response) { } } +// UpdateOAuthApp +func (c *Client4) UpdateOAuthApp(app *OAuthApp) (*OAuthApp, *Response) { + if r, err := c.DoApiPut(c.GetOAuthAppRoute(app.Id), app.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return OAuthAppFromJson(r.Body), BuildResponse(r) + } +} + // GetOAuthApps gets a page of registered OAuth 2.0 client applications with Mattermost acting as an OAuth 2.0 service provider. func (c *Client4) GetOAuthApps(page, perPage int) ([]*OAuthApp, *Response) { query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage) @@ -2711,7 +2800,7 @@ func (c *Client4) TestElasticsearch() (bool, *Response) { // PurgeElasticsearchIndexes immediately deletes all Elasticsearch indexes. func (c *Client4) PurgeElasticsearchIndexes() (bool, *Response) { - if r, err := c.DoApiPost(c.GetElasticsearchRoute()+"/test", ""); err != nil { + if r, err := c.DoApiPost(c.GetElasticsearchRoute()+"/purge_indexes", ""); err != nil { return false, BuildErrorResponse(r, err) } else { defer closeBody(r) @@ -2719,6 +2808,18 @@ func (c *Client4) PurgeElasticsearchIndexes() (bool, *Response) { } } +// Data Retention Section + +// GetDataRetentionPolicy will get the current server data retention policy details. +func (c *Client4) GetDataRetentionPolicy() (*DataRetentionPolicy, *Response) { + if r, err := c.DoApiGet(c.GetDataRetentionRoute()+"/policy", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return DataRetentionPolicyFromJson(r.Body), BuildResponse(r) + } +} + // Commands Section // CreateCommand will create a new command if the user have the right permissions. @@ -2762,9 +2863,28 @@ func (c *Client4) ListCommands(teamId string, customOnly bool) ([]*Command, *Res } } -// ExecuteCommand executes a given command. +// ExecuteCommand executes a given slash command. func (c *Client4) ExecuteCommand(channelId, command string) (*CommandResponse, *Response) { - commandArgs := &CommandArgs{ChannelId: channelId, Command: command} + commandArgs := &CommandArgs{ + ChannelId: channelId, + Command: command, + } + if r, err := c.DoApiPost(c.GetCommandsRoute()+"/execute", commandArgs.ToJson()); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CommandResponseFromJson(r.Body), BuildResponse(r) + } +} + +// ExecuteCommand executes a given slash command against the specified team +// Use this when executing slash commands in a DM/GM, since the team id cannot be inferred in that case +func (c *Client4) ExecuteCommandWithTeam(channelId, teamId, command string) (*CommandResponse, *Response) { + commandArgs := &CommandArgs{ + ChannelId: channelId, + TeamId: teamId, + Command: command, + } if r, err := c.DoApiPost(c.GetCommandsRoute()+"/execute", commandArgs.ToJson()); err != nil { return nil, BuildErrorResponse(r, err) } else { @@ -2849,17 +2969,17 @@ func (c *Client4) CreateEmoji(emoji *Emoji, image []byte, filename string) (*Emo writer := multipart.NewWriter(body) if part, err := writer.CreateFormFile("image", filename); err != nil { - return nil, &Response{StatusCode: http.StatusForbidden, Error: NewLocAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error())} + return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error(), 0)} } else if _, err = io.Copy(part, bytes.NewBuffer(image)); err != nil { - return nil, &Response{StatusCode: http.StatusForbidden, Error: NewLocAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error())} + return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError("CreateEmoji", "model.client.create_emoji.image.app_error", nil, err.Error(), 0)} } if err := writer.WriteField("emoji", emoji.ToJson()); err != nil { - return nil, &Response{StatusCode: http.StatusForbidden, Error: NewLocAppError("CreateEmoji", "model.client.create_emoji.emoji.app_error", nil, err.Error())} + return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError("CreateEmoji", "model.client.create_emoji.emoji.app_error", nil, err.Error(), 0)} } if err := writer.Close(); err != nil { - return nil, &Response{StatusCode: http.StatusForbidden, Error: NewLocAppError("CreateEmoji", "model.client.create_emoji.writer.app_error", nil, err.Error())} + return nil, &Response{StatusCode: http.StatusForbidden, Error: NewAppError("CreateEmoji", "model.client.create_emoji.writer.app_error", nil, err.Error(), 0)} } return c.DoEmojiUploadFile(c.GetEmojisRoute(), body.Bytes(), writer.FormDataContentType()) @@ -3009,3 +3129,97 @@ func (c *Client4) CancelJob(jobId string) (bool, *Response) { return CheckStatusOK(r), BuildResponse(r) } } + +// Plugin Section + +// UploadPlugin takes an io.Reader stream pointing to the contents of a .tar.gz plugin. +// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. +func (c *Client4) UploadPlugin(file io.Reader) (*Manifest, *Response) { + body := new(bytes.Buffer) + writer := multipart.NewWriter(body) + + if part, err := writer.CreateFormFile("plugin", "plugin.tar.gz"); err != nil { + return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.writer.app_error", nil, err.Error(), 0)} + } else if _, err = io.Copy(part, file); err != nil { + return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.writer.app_error", nil, err.Error(), 0)} + } + + if err := writer.Close(); err != nil { + return nil, &Response{Error: NewAppError("UploadPlugin", "model.client.writer.app_error", nil, err.Error(), 0)} + } + + rq, _ := http.NewRequest("POST", c.ApiUrl+c.GetPluginsRoute(), body) + rq.Header.Set("Content-Type", writer.FormDataContentType()) + rq.Close = true + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil || rp == nil { + return nil, BuildErrorResponse(rp, NewAppError("UploadPlugin", "model.client.connecting.app_error", nil, err.Error(), 0)) + } else { + defer closeBody(rp) + + if rp.StatusCode >= 300 { + return nil, BuildErrorResponse(rp, AppErrorFromJson(rp.Body)) + } else { + return ManifestFromJson(rp.Body), BuildResponse(rp) + } + } +} + +// GetPlugins will return a list of plugin manifests for currently active plugins. +// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. +func (c *Client4) GetPlugins() (*PluginsResponse, *Response) { + if r, err := c.DoApiGet(c.GetPluginsRoute(), ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return PluginsResponseFromJson(r.Body), BuildResponse(r) + } +} + +// RemovePlugin will deactivate and delete a plugin. +// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. +func (c *Client4) RemovePlugin(id string) (bool, *Response) { + if r, err := c.DoApiDelete(c.GetPluginRoute(id)); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// GetWebappPlugins will return a list of plugins that the webapp should download. +// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. +func (c *Client4) GetWebappPlugins() ([]*Manifest, *Response) { + if r, err := c.DoApiGet(c.GetPluginsRoute()+"/webapp", ""); err != nil { + return nil, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return ManifestListFromJson(r.Body), BuildResponse(r) + } +} + +// ActivatePlugin will activate an plugin installed. +// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. +func (c *Client4) ActivatePlugin(id string) (bool, *Response) { + if r, err := c.DoApiPost(c.GetPluginRoute(id)+"/activate", ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} + +// DeactivatePlugin will deactivate an active plugin. +// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. +func (c *Client4) DeactivatePlugin(id string) (bool, *Response) { + if r, err := c.DoApiPost(c.GetPluginRoute(id)+"/deactivate", ""); err != nil { + return false, BuildErrorResponse(r, err) + } else { + defer closeBody(r) + return CheckStatusOK(r), BuildResponse(r) + } +} diff --git a/vendor/github.com/mattermost/platform/model/cluster_discovery.go b/vendor/github.com/mattermost/platform/model/cluster_discovery.go index 4b926965..89e5fc95 100644 --- a/vendor/github.com/mattermost/platform/model/cluster_discovery.go +++ b/vendor/github.com/mattermost/platform/model/cluster_discovery.go @@ -6,6 +6,7 @@ package model import ( "encoding/json" "io" + "net/http" "os" ) @@ -85,27 +86,27 @@ func FilterClusterDiscovery(vs []*ClusterDiscovery, f func(*ClusterDiscovery) bo func (o *ClusterDiscovery) IsValid() *AppError { if len(o.Id) != 26 { - return NewLocAppError("Channel.IsValid", "model.channel.is_valid.id.app_error", nil, "") + return NewAppError("Channel.IsValid", "model.channel.is_valid.id.app_error", nil, "", http.StatusBadRequest) } if len(o.ClusterName) == 0 { - return NewLocAppError("ClusterDiscovery.IsValid", "ClusterName must be set", nil, "") + return NewAppError("ClusterDiscovery.IsValid", "ClusterName must be set", nil, "", http.StatusBadRequest) } if len(o.Type) == 0 { - return NewLocAppError("ClusterDiscovery.IsValid", "Type must be set", nil, "") + return NewAppError("ClusterDiscovery.IsValid", "Type must be set", nil, "", http.StatusBadRequest) } if len(o.Hostname) == 0 { - return NewLocAppError("ClusterDiscovery.IsValid", "Hostname must be set", nil, "") + return NewAppError("ClusterDiscovery.IsValid", "Hostname must be set", nil, "", http.StatusBadRequest) } if o.CreateAt == 0 { - return NewLocAppError("ClusterDiscovery.IsValid", "CreateAt must be set", nil, "") + return NewAppError("ClusterDiscovery.IsValid", "CreateAt must be set", nil, "", http.StatusBadRequest) } if o.LastPingAt == 0 { - return NewLocAppError("ClusterDiscovery.IsValid", "LastPingAt must be set", nil, "") + return NewAppError("ClusterDiscovery.IsValid", "LastPingAt must be set", nil, "", http.StatusBadRequest) } return nil diff --git a/vendor/github.com/mattermost/platform/model/cluster_info.go b/vendor/github.com/mattermost/platform/model/cluster_info.go index 1e468044..c4f7e89a 100644 --- a/vendor/github.com/mattermost/platform/model/cluster_info.go +++ b/vendor/github.com/mattermost/platform/model/cluster_info.go @@ -10,6 +10,7 @@ import ( ) type ClusterInfo struct { + Id string `json:"id"` Version string `json:"version"` ConfigHash string `json:"config_hash"` IpAddress string `json:"ipaddress"` diff --git a/vendor/github.com/mattermost/platform/model/command.go b/vendor/github.com/mattermost/platform/model/command.go index 47378bbe..69da41c1 100644 --- a/vendor/github.com/mattermost/platform/model/command.go +++ b/vendor/github.com/mattermost/platform/model/command.go @@ -6,6 +6,7 @@ package model import ( "encoding/json" "io" + "net/http" "strings" ) @@ -79,51 +80,51 @@ func CommandListFromJson(data io.Reader) []*Command { func (o *Command) IsValid() *AppError { if len(o.Id) != 26 { - return NewLocAppError("Command.IsValid", "model.command.is_valid.id.app_error", nil, "") + return NewAppError("Command.IsValid", "model.command.is_valid.id.app_error", nil, "", http.StatusBadRequest) } if len(o.Token) != 26 { - return NewLocAppError("Command.IsValid", "model.command.is_valid.token.app_error", nil, "") + return NewAppError("Command.IsValid", "model.command.is_valid.token.app_error", nil, "", http.StatusBadRequest) } if o.CreateAt == 0 { - return NewLocAppError("Command.IsValid", "model.command.is_valid.create_at.app_error", nil, "") + return NewAppError("Command.IsValid", "model.command.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) } if o.UpdateAt == 0 { - return NewLocAppError("Command.IsValid", "model.command.is_valid.update_at.app_error", nil, "") + return NewAppError("Command.IsValid", "model.command.is_valid.update_at.app_error", nil, "", http.StatusBadRequest) } if len(o.CreatorId) != 26 { - return NewLocAppError("Command.IsValid", "model.command.is_valid.user_id.app_error", nil, "") + return NewAppError("Command.IsValid", "model.command.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } if len(o.TeamId) != 26 { - return NewLocAppError("Command.IsValid", "model.command.is_valid.team_id.app_error", nil, "") + return NewAppError("Command.IsValid", "model.command.is_valid.team_id.app_error", nil, "", http.StatusBadRequest) } if len(o.Trigger) < MIN_TRIGGER_LENGTH || len(o.Trigger) > MAX_TRIGGER_LENGTH || strings.Index(o.Trigger, "/") == 0 || strings.Contains(o.Trigger, " ") { - return NewLocAppError("Command.IsValid", "model.command.is_valid.trigger.app_error", nil, "") + return NewAppError("Command.IsValid", "model.command.is_valid.trigger.app_error", nil, "", http.StatusBadRequest) } if len(o.URL) == 0 || len(o.URL) > 1024 { - return NewLocAppError("Command.IsValid", "model.command.is_valid.url.app_error", nil, "") + return NewAppError("Command.IsValid", "model.command.is_valid.url.app_error", nil, "", http.StatusBadRequest) } if !IsValidHttpUrl(o.URL) { - return NewLocAppError("Command.IsValid", "model.command.is_valid.url_http.app_error", nil, "") + return NewAppError("Command.IsValid", "model.command.is_valid.url_http.app_error", nil, "", http.StatusBadRequest) } if !(o.Method == COMMAND_METHOD_GET || o.Method == COMMAND_METHOD_POST) { - return NewLocAppError("Command.IsValid", "model.command.is_valid.method.app_error", nil, "") + return NewAppError("Command.IsValid", "model.command.is_valid.method.app_error", nil, "", http.StatusBadRequest) } if len(o.DisplayName) > 64 { - return NewLocAppError("Command.IsValid", "model.command.is_valid.display_name.app_error", nil, "") + return NewAppError("Command.IsValid", "model.command.is_valid.display_name.app_error", nil, "", http.StatusBadRequest) } if len(o.Description) > 128 { - return NewLocAppError("Command.IsValid", "model.command.is_valid.description.app_error", nil, "") + return NewAppError("Command.IsValid", "model.command.is_valid.description.app_error", nil, "", http.StatusBadRequest) } return nil diff --git a/vendor/github.com/mattermost/platform/model/command_response.go b/vendor/github.com/mattermost/platform/model/command_response.go index 27d39e17..a3a171ce 100644 --- a/vendor/github.com/mattermost/platform/model/command_response.go +++ b/vendor/github.com/mattermost/platform/model/command_response.go @@ -6,6 +6,8 @@ package model import ( "encoding/json" "io" + "io/ioutil" + "strings" ) const ( @@ -18,6 +20,8 @@ type CommandResponse struct { Text string `json:"text"` Username string `json:"username"` IconURL string `json:"icon_url"` + Type string `json:"type"` + Props StringInterface `json:"props"` GotoLocation string `json:"goto_location"` Attachments []*SlackAttachment `json:"attachments"` } @@ -31,6 +35,22 @@ func (o *CommandResponse) ToJson() string { } } +func CommandResponseFromHTTPBody(contentType string, body io.Reader) *CommandResponse { + if strings.TrimSpace(strings.Split(contentType, ";")[0]) == "application/json" { + return CommandResponseFromJson(body) + } + if b, err := ioutil.ReadAll(body); err == nil { + return CommandResponseFromPlainText(string(b)) + } + return nil +} + +func CommandResponseFromPlainText(text string) *CommandResponse { + return &CommandResponse{ + Text: text, + } +} + func CommandResponseFromJson(data io.Reader) *CommandResponse { decoder := json.NewDecoder(data) var o CommandResponse @@ -39,8 +59,7 @@ func CommandResponseFromJson(data io.Reader) *CommandResponse { return nil } - o.Text = ExpandAnnouncement(o.Text) - o.Attachments = ProcessSlackAttachments(o.Attachments) + o.Attachments = StringifySlackFieldValue(o.Attachments) return &o } diff --git a/vendor/github.com/mattermost/platform/model/command_webhook.go b/vendor/github.com/mattermost/platform/model/command_webhook.go new file mode 100644 index 00000000..0b00e00b --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/command_webhook.go @@ -0,0 +1,65 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "net/http" +) + +type CommandWebhook struct { + Id string + CreateAt int64 + CommandId string + UserId string + ChannelId string + RootId string + ParentId string + UseCount int +} + +const ( + COMMAND_WEBHOOK_LIFETIME = 1000 * 60 * 30 +) + +func (o *CommandWebhook) PreSave() { + if o.Id == "" { + o.Id = NewId() + } + + if o.CreateAt == 0 { + o.CreateAt = GetMillis() + } +} + +func (o *CommandWebhook) IsValid() *AppError { + if len(o.Id) != 26 { + return NewAppError("CommandWebhook.IsValid", "model.command_hook.id.app_error", nil, "", http.StatusBadRequest) + } + + if o.CreateAt == 0 { + return NewAppError("CommandWebhook.IsValid", "model.command_hook.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) + } + + if len(o.CommandId) != 26 { + return NewAppError("CommandWebhook.IsValid", "model.command_hook.command_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.UserId) != 26 { + return NewAppError("CommandWebhook.IsValid", "model.command_hook.user_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.ChannelId) != 26 { + return NewAppError("CommandWebhook.IsValid", "model.command_hook.channel_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.RootId) != 0 && len(o.RootId) != 26 { + return NewAppError("CommandWebhook.IsValid", "model.command_hook.root_id.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.ParentId) != 0 && len(o.ParentId) != 26 { + return NewAppError("CommandWebhook.IsValid", "model.command_hook.parent_id.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} diff --git a/vendor/github.com/mattermost/platform/model/compliance.go b/vendor/github.com/mattermost/platform/model/compliance.go index 14f8f4a5..3134ddba 100644 --- a/vendor/github.com/mattermost/platform/model/compliance.go +++ b/vendor/github.com/mattermost/platform/model/compliance.go @@ -6,6 +6,7 @@ package model import ( "encoding/json" "io" + "net/http" "strings" ) @@ -75,27 +76,27 @@ func (me *Compliance) JobName() string { func (me *Compliance) IsValid() *AppError { if len(me.Id) != 26 { - return NewLocAppError("Compliance.IsValid", "model.compliance.is_valid.id.app_error", nil, "") + return NewAppError("Compliance.IsValid", "model.compliance.is_valid.id.app_error", nil, "", http.StatusBadRequest) } if me.CreateAt == 0 { - return NewLocAppError("Compliance.IsValid", "model.compliance.is_valid.create_at.app_error", nil, "") + return NewAppError("Compliance.IsValid", "model.compliance.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) } if len(me.Desc) > 512 || len(me.Desc) == 0 { - return NewLocAppError("Compliance.IsValid", "model.compliance.is_valid.desc.app_error", nil, "") + return NewAppError("Compliance.IsValid", "model.compliance.is_valid.desc.app_error", nil, "", http.StatusBadRequest) } if me.StartAt == 0 { - return NewLocAppError("Compliance.IsValid", "model.compliance.is_valid.start_at.app_error", nil, "") + return NewAppError("Compliance.IsValid", "model.compliance.is_valid.start_at.app_error", nil, "", http.StatusBadRequest) } if me.EndAt == 0 { - return NewLocAppError("Compliance.IsValid", "model.compliance.is_valid.end_at.app_error", nil, "") + return NewAppError("Compliance.IsValid", "model.compliance.is_valid.end_at.app_error", nil, "", http.StatusBadRequest) } if me.EndAt <= me.StartAt { - return NewLocAppError("Compliance.IsValid", "model.compliance.is_valid.start_end_at.app_error", nil, "") + return NewAppError("Compliance.IsValid", "model.compliance.is_valid.start_end_at.app_error", nil, "", http.StatusBadRequest) } return nil diff --git a/vendor/github.com/mattermost/platform/model/config.go b/vendor/github.com/mattermost/platform/model/config.go index 1717d61a..fb34d1a0 100644 --- a/vendor/github.com/mattermost/platform/model/config.go +++ b/vendor/github.com/mattermost/platform/model/config.go @@ -8,6 +8,8 @@ import ( "io" "net/http" "net/url" + "strings" + "time" ) const ( @@ -22,6 +24,10 @@ const ( DATABASE_DRIVER_MYSQL = "mysql" DATABASE_DRIVER_POSTGRES = "postgres" + MINIO_ACCESS_KEY = "minioaccesskey" + MINIO_SECRET_KEY = "miniosecretkey" + MINIO_BUCKET = "mattermost-test" + PASSWORD_MAXIMUM_LENGTH = 64 PASSWORD_MINIMUM_LENGTH = 5 @@ -71,17 +77,22 @@ const ( SITENAME_MAX_LENGTH = 30 - SERVICE_SETTINGS_DEFAULT_SITE_URL = "" - SERVICE_SETTINGS_DEFAULT_TLS_CERT_FILE = "" - SERVICE_SETTINGS_DEFAULT_TLS_KEY_FILE = "" - SERVICE_SETTINGS_DEFAULT_READ_TIMEOUT = 300 - SERVICE_SETTINGS_DEFAULT_WRITE_TIMEOUT = 300 - SERVICE_SETTINGS_DEFAULT_ALLOW_CORS_FROM = "" + SERVICE_SETTINGS_DEFAULT_SITE_URL = "" + SERVICE_SETTINGS_DEFAULT_TLS_CERT_FILE = "" + SERVICE_SETTINGS_DEFAULT_TLS_KEY_FILE = "" + SERVICE_SETTINGS_DEFAULT_READ_TIMEOUT = 300 + SERVICE_SETTINGS_DEFAULT_WRITE_TIMEOUT = 300 + SERVICE_SETTINGS_DEFAULT_MAX_LOGIN_ATTEMPTS = 10 + SERVICE_SETTINGS_DEFAULT_ALLOW_CORS_FROM = "" + SERVICE_SETTINGS_DEFAULT_LISTEN_AND_ADDRESS = ":8065" + TEAM_SETTINGS_DEFAULT_MAX_USERS_PER_TEAM = 50 TEAM_SETTINGS_DEFAULT_CUSTOM_BRAND_TEXT = "" TEAM_SETTINGS_DEFAULT_CUSTOM_DESCRIPTION_TEXT = "" TEAM_SETTINGS_DEFAULT_USER_STATUS_AWAY_TIMEOUT = 300 + SQL_SETTINGS_DEFAULT_DATA_SOURCE = "mmuser:mostest@tcp(dockerhost:3306)/mattermost_test?charset=utf8mb4,utf8&readTimeout=30s&writeTimeout=30s" + EMAIL_SETTINGS_DEFAULT_FEEDBACK_ORGANIZATION = "" SUPPORT_SETTINGS_DEFAULT_TERMS_OF_SERVICE_LINK = "https://about.mattermost.com/default-terms/" @@ -123,17 +134,32 @@ const ( ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_COLOR = "#f2a93b" ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_TEXT_COLOR = "#333333" - ELASTICSEARCH_SETTINGS_DEFAULT_CONNECTION_URL = "" - ELASTICSEARCH_SETTINGS_DEFAULT_USERNAME = "" - ELASTICSEARCH_SETTINGS_DEFAULT_PASSWORD = "" - ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_REPLICAS = 1 - ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_SHARDS = 1 + TEAM_SETTINGS_DEFAULT_TEAM_TEXT = "default" + + ELASTICSEARCH_SETTINGS_DEFAULT_CONNECTION_URL = "" + ELASTICSEARCH_SETTINGS_DEFAULT_USERNAME = "" + ELASTICSEARCH_SETTINGS_DEFAULT_PASSWORD = "" + ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_REPLICAS = 1 + ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_SHARDS = 1 + ELASTICSEARCH_SETTINGS_DEFAULT_AGGREGATE_POSTS_AFTER_DAYS = 365 + ELASTICSEARCH_SETTINGS_DEFAULT_POSTS_AGGREGATOR_JOB_START_TIME = "03:00" + ELASTICSEARCH_SETTINGS_DEFAULT_INDEX_PREFIX = "" + ELASTICSEARCH_SETTINGS_DEFAULT_LIVE_INDEXING_BATCH_SIZE = 1 + ELASTICSEARCH_SETTINGS_DEFAULT_BULK_INDEXING_TIME_WINDOW_SECONDS = 3600 + ELASTICSEARCH_SETTINGS_DEFAULT_REQUEST_TIMEOUT_SECONDS = 30 + + DATA_RETENTION_SETTINGS_DEFAULT_MESSAGE_RETENTION_DAYS = 365 + DATA_RETENTION_SETTINGS_DEFAULT_FILE_RETENTION_DAYS = 365 + DATA_RETENTION_SETTINGS_DEFAULT_DELETION_JOB_START_TIME = "02:00" + + PLUGIN_SETTINGS_DEFAULT_DIRECTORY = "./plugins" + PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY = "./client/plugins" ) type ServiceSettings struct { SiteURL *string LicenseFileLocation *string - ListenAddress string + ListenAddress *string ConnectionSecurity *string TLSCertFile *string TLSKeyFile *string @@ -142,7 +168,7 @@ type ServiceSettings struct { Forward80To443 *bool ReadTimeout *int WriteTimeout *int - MaximumLoginAttempts int + MaximumLoginAttempts *int GoroutineHealthThreshold *int GoogleDeveloperKey string EnableOAuthServiceProvider bool @@ -158,6 +184,7 @@ type ServiceSettings struct { EnableDeveloper *bool EnableSecurityFixAlert *bool EnableInsecureOutgoingConnections *bool + AllowedUntrustedInternalConnections *string EnableMultifactorAuthentication *bool EnforceMultifactorAuthentication *bool EnableUserAccessTokens *bool @@ -166,6 +193,7 @@ type ServiceSettings struct { SessionLengthMobileInDays *int SessionLengthSSOInDays *int SessionCacheInMinutes *int + SessionIdleTimeoutInMinutes *int WebsocketSecurePort *int WebsocketPort *int WebserverMode *string @@ -180,7 +208,211 @@ type ServiceSettings struct { EnableUserTypingMessages *bool EnableChannelViewedMessages *bool EnableUserStatuses *bool + ExperimentalEnableAuthenticationTransfer *bool ClusterLogTimeoutMilliseconds *int + CloseUnusedDirectMessages *bool + EnablePreviewFeatures *bool + EnableTutorial *bool +} + +func (s *ServiceSettings) SetDefaults() { + if s.SiteURL == nil { + s.SiteURL = NewString(SERVICE_SETTINGS_DEFAULT_SITE_URL) + } + + if s.LicenseFileLocation == nil { + s.LicenseFileLocation = NewString("") + } + + if s.ListenAddress == nil { + s.ListenAddress = NewString(SERVICE_SETTINGS_DEFAULT_LISTEN_AND_ADDRESS) + } + + if s.EnableAPIv3 == nil { + s.EnableAPIv3 = NewBool(true) + } + + if s.EnableLinkPreviews == nil { + s.EnableLinkPreviews = NewBool(false) + } + + if s.EnableDeveloper == nil { + s.EnableDeveloper = NewBool(false) + } + + if s.EnableSecurityFixAlert == nil { + s.EnableSecurityFixAlert = NewBool(true) + } + + if s.EnableInsecureOutgoingConnections == nil { + s.EnableInsecureOutgoingConnections = NewBool(false) + } + + if s.AllowedUntrustedInternalConnections == nil { + s.AllowedUntrustedInternalConnections = new(string) + } + + if s.EnableMultifactorAuthentication == nil { + s.EnableMultifactorAuthentication = NewBool(false) + } + + if s.EnforceMultifactorAuthentication == nil { + s.EnforceMultifactorAuthentication = NewBool(false) + } + + if s.EnableUserAccessTokens == nil { + s.EnableUserAccessTokens = NewBool(false) + } + + if s.GoroutineHealthThreshold == nil { + s.GoroutineHealthThreshold = NewInt(-1) + } + + if s.ConnectionSecurity == nil { + s.ConnectionSecurity = NewString("") + } + + if s.TLSKeyFile == nil { + s.TLSKeyFile = NewString(SERVICE_SETTINGS_DEFAULT_TLS_KEY_FILE) + } + + if s.TLSCertFile == nil { + s.TLSCertFile = NewString(SERVICE_SETTINGS_DEFAULT_TLS_CERT_FILE) + } + + if s.UseLetsEncrypt == nil { + s.UseLetsEncrypt = NewBool(false) + } + + if s.LetsEncryptCertificateCacheFile == nil { + s.LetsEncryptCertificateCacheFile = NewString("./config/letsencrypt.cache") + } + + if s.ReadTimeout == nil { + s.ReadTimeout = NewInt(SERVICE_SETTINGS_DEFAULT_READ_TIMEOUT) + } + + if s.WriteTimeout == nil { + s.WriteTimeout = NewInt(SERVICE_SETTINGS_DEFAULT_WRITE_TIMEOUT) + } + + if s.MaximumLoginAttempts == nil { + s.MaximumLoginAttempts = NewInt(SERVICE_SETTINGS_DEFAULT_MAX_LOGIN_ATTEMPTS) + } + + if s.Forward80To443 == nil { + s.Forward80To443 = NewBool(false) + } + + if s.TimeBetweenUserTypingUpdatesMilliseconds == nil { + s.TimeBetweenUserTypingUpdatesMilliseconds = NewInt64(5000) + } + + if s.EnablePostSearch == nil { + s.EnablePostSearch = NewBool(true) + } + + if s.EnableUserTypingMessages == nil { + s.EnableUserTypingMessages = NewBool(true) + } + + if s.EnableChannelViewedMessages == nil { + s.EnableChannelViewedMessages = NewBool(true) + } + + if s.EnableUserStatuses == nil { + s.EnableUserStatuses = NewBool(true) + } + + if s.ClusterLogTimeoutMilliseconds == nil { + s.ClusterLogTimeoutMilliseconds = NewInt(2000) + } + + if s.CloseUnusedDirectMessages == nil { + s.CloseUnusedDirectMessages = NewBool(false) + } + + if s.EnableTutorial == nil { + s.EnableTutorial = NewBool(true) + } + + if s.SessionLengthWebInDays == nil { + s.SessionLengthWebInDays = NewInt(30) + } + + if s.SessionLengthMobileInDays == nil { + s.SessionLengthMobileInDays = NewInt(30) + } + + if s.SessionLengthSSOInDays == nil { + s.SessionLengthSSOInDays = NewInt(30) + } + + if s.SessionCacheInMinutes == nil { + s.SessionCacheInMinutes = NewInt(10) + } + + if s.SessionIdleTimeoutInMinutes == nil { + s.SessionIdleTimeoutInMinutes = NewInt(0) + } + + if s.EnableCommands == nil { + s.EnableCommands = NewBool(false) + } + + if s.EnableOnlyAdminIntegrations == nil { + s.EnableOnlyAdminIntegrations = NewBool(true) + } + + if s.WebsocketPort == nil { + s.WebsocketPort = NewInt(80) + } + + if s.WebsocketSecurePort == nil { + s.WebsocketSecurePort = NewInt(443) + } + + if s.AllowCorsFrom == nil { + s.AllowCorsFrom = NewString(SERVICE_SETTINGS_DEFAULT_ALLOW_CORS_FROM) + } + + if s.WebserverMode == nil { + s.WebserverMode = NewString("gzip") + } else if *s.WebserverMode == "regular" { + *s.WebserverMode = "gzip" + } + + if s.EnableCustomEmoji == nil { + s.EnableCustomEmoji = NewBool(false) + } + + if s.EnableEmojiPicker == nil { + s.EnableEmojiPicker = NewBool(true) + } + + if s.RestrictCustomEmojiCreation == nil { + s.RestrictCustomEmojiCreation = NewString(RESTRICT_EMOJI_CREATION_ALL) + } + + if s.RestrictPostDelete == nil { + s.RestrictPostDelete = NewString(PERMISSIONS_DELETE_POST_ALL) + } + + if s.AllowEditPost == nil { + s.AllowEditPost = NewString(ALLOW_EDIT_POST_ALWAYS) + } + + if s.ExperimentalEnableAuthenticationTransfer == nil { + s.ExperimentalEnableAuthenticationTransfer = NewBool(true) + } + + if s.PostEditTimeLimit == nil { + s.PostEditTimeLimit = NewInt(300) + } + + if s.EnablePreviewFeatures == nil { + s.EnablePreviewFeatures = NewBool(true) + } } type ClusterSettings struct { @@ -194,16 +426,70 @@ type ClusterSettings struct { StreamingPort *int } +func (s *ClusterSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.ClusterName == nil { + s.ClusterName = NewString("") + } + + if s.OverrideHostname == nil { + s.OverrideHostname = NewString("") + } + + if s.UseIpAddress == nil { + s.UseIpAddress = NewBool(true) + } + + if s.UseExperimentalGossip == nil { + s.UseExperimentalGossip = NewBool(false) + } + + if s.ReadOnlyConfig == nil { + s.ReadOnlyConfig = NewBool(true) + } + + if s.GossipPort == nil { + s.GossipPort = NewInt(8074) + } + + if s.StreamingPort == nil { + s.StreamingPort = NewInt(8075) + } +} + type MetricsSettings struct { Enable *bool BlockProfileRate *int ListenAddress *string } +func (s *MetricsSettings) SetDefaults() { + if s.ListenAddress == nil { + s.ListenAddress = NewString(":8067") + } + + if s.Enable == nil { + s.Enable = NewBool(false) + } + + if s.BlockProfileRate == nil { + s.BlockProfileRate = NewInt(0) + } +} + type AnalyticsSettings struct { MaxUsersForStatistics *int } +func (s *AnalyticsSettings) SetDefaults() { + if s.MaxUsersForStatistics == nil { + s.MaxUsersForStatistics = NewInt(ANALYTICS_SETTINGS_DEFAULT_MAX_USERS_FOR_STATISTICS) + } +} + type SSOSettings struct { Enable bool Secret string @@ -215,17 +501,43 @@ type SSOSettings struct { } type SqlSettings struct { - DriverName string - DataSource string + DriverName *string + DataSource *string DataSourceReplicas []string DataSourceSearchReplicas []string - MaxIdleConns int - MaxOpenConns int + MaxIdleConns *int + MaxOpenConns *int Trace bool AtRestEncryptKey string QueryTimeout *int } +func (s *SqlSettings) SetDefaults() { + if s.DriverName == nil { + s.DriverName = NewString(DATABASE_DRIVER_MYSQL) + } + + if s.DataSource == nil { + s.DataSource = NewString(SQL_SETTINGS_DEFAULT_DATA_SOURCE) + } + + if len(s.AtRestEncryptKey) == 0 { + s.AtRestEncryptKey = NewRandomString(32) + } + + if s.MaxIdleConns == nil { + s.MaxIdleConns = NewInt(20) + } + + if s.MaxOpenConns == nil { + s.MaxOpenConns = NewInt(300) + } + + if s.QueryTimeout == nil { + s.QueryTimeout = NewInt(30) + } +} + type LogSettings struct { EnableConsole bool ConsoleLevel string @@ -237,6 +549,12 @@ type LogSettings struct { EnableDiagnostics *bool } +func (s *LogSettings) SetDefaults() { + if s.EnableDiagnostics == nil { + s.EnableDiagnostics = NewBool(true) + } +} + type PasswordSettings struct { MinimumLength *int Lowercase *bool @@ -245,12 +563,34 @@ type PasswordSettings struct { Symbol *bool } +func (s *PasswordSettings) SetDefaults() { + if s.MinimumLength == nil { + s.MinimumLength = NewInt(PASSWORD_MINIMUM_LENGTH) + } + + if s.Lowercase == nil { + s.Lowercase = NewBool(false) + } + + if s.Number == nil { + s.Number = NewBool(false) + } + + if s.Uppercase == nil { + s.Uppercase = NewBool(false) + } + + if s.Symbol == nil { + s.Symbol = NewBool(false) + } +} + type FileSettings struct { EnableFileAttachments *bool EnableMobileUpload *bool EnableMobileDownload *bool MaxFileSize *int64 - DriverName string + DriverName *string Directory string EnablePublicLink bool PublicLinkSalt *string @@ -263,6 +603,64 @@ type FileSettings struct { AmazonS3SSL *bool AmazonS3SignV2 *bool AmazonS3SSE *bool + AmazonS3Trace *bool +} + +func (s *FileSettings) SetDefaults() { + if s.DriverName == nil { + s.DriverName = NewString(IMAGE_DRIVER_LOCAL) + } + + if s.AmazonS3Endpoint == "" { + // Defaults to "s3.amazonaws.com" + s.AmazonS3Endpoint = "s3.amazonaws.com" + } + + if s.AmazonS3SSL == nil { + s.AmazonS3SSL = NewBool(true) // Secure by default. + } + + if s.AmazonS3SignV2 == nil { + s.AmazonS3SignV2 = new(bool) + // Signature v2 is not enabled by default. + } + + if s.AmazonS3SSE == nil { + s.AmazonS3SSE = NewBool(false) // Not Encrypted by default. + } + + if s.AmazonS3Trace == nil { + s.AmazonS3Trace = NewBool(false) + } + + if s.EnableFileAttachments == nil { + s.EnableFileAttachments = NewBool(true) + } + + if s.EnableMobileUpload == nil { + s.EnableMobileUpload = NewBool(true) + } + + if s.EnableMobileDownload == nil { + s.EnableMobileDownload = NewBool(true) + } + + if s.MaxFileSize == nil { + s.MaxFileSize = NewInt64(52428800) // 50 MB + } + + if s.PublicLinkSalt == nil || len(*s.PublicLinkSalt) == 0 { + s.PublicLinkSalt = NewString(NewRandomString(32)) + } + + if s.InitialFont == "" { + // Defaults to "luximbi.ttf" + s.InitialFont = "luximbi.ttf" + } + + if s.Directory == "" { + s.Directory = "./data/" + } } type EmailSettings struct { @@ -270,6 +668,7 @@ type EmailSettings struct { EnableSignInWithEmail *bool EnableSignInWithUsername *bool SendEmailNotifications bool + UseChannelInEmailNotifications *bool RequireEmailVerification bool FeedbackName string FeedbackEmail string @@ -289,1412 +688,1441 @@ type EmailSettings struct { EmailBatchingInterval *int SkipServerCertificateVerification *bool EmailNotificationContentsType *string + LoginButtonColor *string + LoginButtonBorderColor *string + LoginButtonTextColor *string } -type RateLimitSettings struct { - Enable *bool - PerSec int - MaxBurst *int - MemoryStoreSize int - VaryByRemoteAddr bool - VaryByHeader string -} - -type PrivacySettings struct { - ShowEmailAddress bool - ShowFullName bool -} - -type SupportSettings struct { - TermsOfServiceLink *string - PrivacyPolicyLink *string - AboutLink *string - HelpLink *string - ReportAProblemLink *string - AdministratorsGuideLink *string - TroubleshootingForumLink *string - CommercialSupportLink *string - SupportEmail *string -} - -type AnnouncementSettings struct { - EnableBanner *bool - BannerText *string - BannerColor *string - BannerTextColor *string - AllowBannerDismissal *bool -} - -type TeamSettings struct { - SiteName string - MaxUsersPerTeam int - EnableTeamCreation bool - EnableUserCreation bool - EnableOpenServer *bool - RestrictCreationToDomains string - EnableCustomBrand *bool - CustomBrandText *string - CustomDescriptionText *string - RestrictDirectMessage *string - RestrictTeamInvite *string - RestrictPublicChannelManagement *string - RestrictPrivateChannelManagement *string - RestrictPublicChannelCreation *string - RestrictPrivateChannelCreation *string - RestrictPublicChannelDeletion *string - RestrictPrivateChannelDeletion *string - RestrictPrivateChannelManageMembers *string - UserStatusAwayTimeout *int64 - MaxChannelsPerTeam *int64 - MaxNotificationsPerChannel *int64 - TeammateNameDisplay *string -} - -type LdapSettings struct { - // Basic - Enable *bool - LdapServer *string - LdapPort *int - ConnectionSecurity *string - BaseDN *string - BindUsername *string - BindPassword *string - - // Filtering - UserFilter *string - - // User Mapping - FirstNameAttribute *string - LastNameAttribute *string - EmailAttribute *string - UsernameAttribute *string - NicknameAttribute *string - IdAttribute *string - PositionAttribute *string - - // Syncronization - SyncIntervalMinutes *int - - // Advanced - SkipCertificateVerification *bool - QueryTimeout *int - MaxPageSize *int - - // Customization - LoginFieldName *string -} - -type ComplianceSettings struct { - Enable *bool - Directory *string - EnableDaily *bool -} +func (s *EmailSettings) SetDefaults() { + if len(s.InviteSalt) == 0 { + s.InviteSalt = NewRandomString(32) + } -type LocalizationSettings struct { - DefaultServerLocale *string - DefaultClientLocale *string - AvailableLocales *string -} + if s.EnableSignInWithEmail == nil { + s.EnableSignInWithEmail = NewBool(s.EnableSignUpWithEmail) + } -type SamlSettings struct { - // Basic - Enable *bool - Verify *bool - Encrypt *bool + if s.EnableSignInWithUsername == nil { + s.EnableSignInWithUsername = NewBool(false) + } - IdpUrl *string - IdpDescriptorUrl *string - AssertionConsumerServiceURL *string + if s.UseChannelInEmailNotifications == nil { + s.UseChannelInEmailNotifications = NewBool(false) + } - IdpCertificateFile *string - PublicCertificateFile *string - PrivateKeyFile *string + if s.SendPushNotifications == nil { + s.SendPushNotifications = NewBool(false) + } - // User Mapping - FirstNameAttribute *string - LastNameAttribute *string - EmailAttribute *string - UsernameAttribute *string - NicknameAttribute *string - LocaleAttribute *string - PositionAttribute *string + if s.PushNotificationServer == nil { + s.PushNotificationServer = NewString("") + } - LoginButtonText *string -} + if s.PushNotificationContents == nil { + s.PushNotificationContents = NewString(GENERIC_NOTIFICATION) + } -type NativeAppSettings struct { - AppDownloadLink *string - AndroidAppDownloadLink *string - IosAppDownloadLink *string -} + if s.FeedbackOrganization == nil { + s.FeedbackOrganization = NewString(EMAIL_SETTINGS_DEFAULT_FEEDBACK_ORGANIZATION) + } -type WebrtcSettings struct { - Enable *bool - GatewayWebsocketUrl *string - GatewayAdminUrl *string - GatewayAdminSecret *string - StunURI *string - TurnURI *string - TurnUsername *string - TurnSharedKey *string -} + if s.EnableEmailBatching == nil { + s.EnableEmailBatching = NewBool(false) + } -type ElasticsearchSettings struct { - ConnectionUrl *string - Username *string - Password *string - EnableIndexing *bool - EnableSearching *bool - Sniff *bool - PostIndexReplicas *int - PostIndexShards *int -} + if s.EmailBatchingBufferSize == nil { + s.EmailBatchingBufferSize = NewInt(EMAIL_BATCHING_BUFFER_SIZE) + } -type DataRetentionSettings struct { - Enable *bool -} + if s.EmailBatchingInterval == nil { + s.EmailBatchingInterval = NewInt(EMAIL_BATCHING_INTERVAL) + } -type JobSettings struct { - RunJobs *bool - RunScheduler *bool -} + if s.EnableSMTPAuth == nil { + s.EnableSMTPAuth = new(bool) + if s.ConnectionSecurity == CONN_SECURITY_NONE { + *s.EnableSMTPAuth = false + } else { + *s.EnableSMTPAuth = true + } + } -type PluginSettings struct { - Plugins map[string]interface{} -} + if s.ConnectionSecurity == CONN_SECURITY_PLAIN { + s.ConnectionSecurity = CONN_SECURITY_NONE + } -type Config struct { - ServiceSettings ServiceSettings - TeamSettings TeamSettings - SqlSettings SqlSettings - LogSettings LogSettings - PasswordSettings PasswordSettings - FileSettings FileSettings - EmailSettings EmailSettings - RateLimitSettings RateLimitSettings - PrivacySettings PrivacySettings - SupportSettings SupportSettings - AnnouncementSettings AnnouncementSettings - GitLabSettings SSOSettings - GoogleSettings SSOSettings - Office365Settings SSOSettings - LdapSettings LdapSettings - ComplianceSettings ComplianceSettings - LocalizationSettings LocalizationSettings - SamlSettings SamlSettings - NativeAppSettings NativeAppSettings - ClusterSettings ClusterSettings - MetricsSettings MetricsSettings - AnalyticsSettings AnalyticsSettings - WebrtcSettings WebrtcSettings - ElasticsearchSettings ElasticsearchSettings - DataRetentionSettings DataRetentionSettings - JobSettings JobSettings - PluginSettings PluginSettings -} + if s.SkipServerCertificateVerification == nil { + s.SkipServerCertificateVerification = NewBool(false) + } -func (o *Config) ToJson() string { - b, err := json.Marshal(o) - if err != nil { - return "" - } else { - return string(b) + if s.EmailNotificationContentsType == nil { + s.EmailNotificationContentsType = NewString(EMAIL_NOTIFICATION_CONTENTS_FULL) } -} -func (o *Config) GetSSOService(service string) *SSOSettings { - switch service { - case SERVICE_GITLAB: - return &o.GitLabSettings - case SERVICE_GOOGLE: - return &o.GoogleSettings - case SERVICE_OFFICE365: - return &o.Office365Settings + if s.LoginButtonColor == nil { + s.LoginButtonColor = NewString("#0000") } - return nil -} + if s.LoginButtonBorderColor == nil { + s.LoginButtonBorderColor = NewString("#2389D7") + } -func ConfigFromJson(data io.Reader) *Config { - decoder := json.NewDecoder(data) - var o Config - err := decoder.Decode(&o) - if err == nil { - return &o - } else { - return nil + if s.LoginButtonTextColor == nil { + s.LoginButtonTextColor = NewString("#2389D7") } } -func (o *Config) SetDefaults() { +type RateLimitSettings struct { + Enable *bool + PerSec *int + MaxBurst *int + MemoryStoreSize *int + VaryByRemoteAddr bool + VaryByHeader string +} - if len(o.SqlSettings.AtRestEncryptKey) == 0 { - o.SqlSettings.AtRestEncryptKey = NewRandomString(32) +func (s *RateLimitSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) } - if o.SqlSettings.QueryTimeout == nil { - o.SqlSettings.QueryTimeout = new(int) - *o.SqlSettings.QueryTimeout = 30 + if s.PerSec == nil { + s.PerSec = NewInt(10) } - if o.FileSettings.AmazonS3Endpoint == "" { - // Defaults to "s3.amazonaws.com" - o.FileSettings.AmazonS3Endpoint = "s3.amazonaws.com" + if s.MaxBurst == nil { + s.MaxBurst = NewInt(100) } - if o.FileSettings.AmazonS3SSL == nil { - o.FileSettings.AmazonS3SSL = new(bool) - *o.FileSettings.AmazonS3SSL = true // Secure by default. + if s.MemoryStoreSize == nil { + s.MemoryStoreSize = NewInt(10000) } +} - if o.FileSettings.AmazonS3SignV2 == nil { - o.FileSettings.AmazonS3SignV2 = new(bool) - // Signature v2 is not enabled by default. - } +type PrivacySettings struct { + ShowEmailAddress bool + ShowFullName bool +} - if o.FileSettings.AmazonS3SSE == nil { - o.FileSettings.AmazonS3SSE = new(bool) - *o.FileSettings.AmazonS3SSE = false // Not Encrypted by default. - } +type SupportSettings struct { + TermsOfServiceLink *string + PrivacyPolicyLink *string + AboutLink *string + HelpLink *string + ReportAProblemLink *string + SupportEmail *string +} - if o.FileSettings.EnableFileAttachments == nil { - o.FileSettings.EnableFileAttachments = new(bool) - *o.FileSettings.EnableFileAttachments = true +func (s *SupportSettings) SetDefaults() { + if !IsSafeLink(s.TermsOfServiceLink) { + *s.TermsOfServiceLink = SUPPORT_SETTINGS_DEFAULT_TERMS_OF_SERVICE_LINK } - if o.FileSettings.EnableMobileUpload == nil { - o.FileSettings.EnableMobileUpload = new(bool) - *o.FileSettings.EnableMobileUpload = true + if s.TermsOfServiceLink == nil { + s.TermsOfServiceLink = NewString(SUPPORT_SETTINGS_DEFAULT_TERMS_OF_SERVICE_LINK) } - if o.FileSettings.EnableMobileDownload == nil { - o.FileSettings.EnableMobileDownload = new(bool) - *o.FileSettings.EnableMobileDownload = true + if !IsSafeLink(s.PrivacyPolicyLink) { + *s.PrivacyPolicyLink = "" } - if o.FileSettings.MaxFileSize == nil { - o.FileSettings.MaxFileSize = new(int64) - *o.FileSettings.MaxFileSize = 52428800 // 50 MB + if s.PrivacyPolicyLink == nil { + s.PrivacyPolicyLink = NewString(SUPPORT_SETTINGS_DEFAULT_PRIVACY_POLICY_LINK) } - if o.FileSettings.PublicLinkSalt == nil || len(*o.FileSettings.PublicLinkSalt) == 0 { - o.FileSettings.PublicLinkSalt = new(string) - *o.FileSettings.PublicLinkSalt = NewRandomString(32) + if !IsSafeLink(s.AboutLink) { + *s.AboutLink = "" } - if o.FileSettings.InitialFont == "" { - // Defaults to "luximbi.ttf" - o.FileSettings.InitialFont = "luximbi.ttf" + if s.AboutLink == nil { + s.AboutLink = NewString(SUPPORT_SETTINGS_DEFAULT_ABOUT_LINK) } - if o.FileSettings.Directory == "" { - o.FileSettings.Directory = "./data/" + if !IsSafeLink(s.HelpLink) { + *s.HelpLink = "" } - if len(o.EmailSettings.InviteSalt) == 0 { - o.EmailSettings.InviteSalt = NewRandomString(32) + if s.HelpLink == nil { + s.HelpLink = NewString(SUPPORT_SETTINGS_DEFAULT_HELP_LINK) } - if o.ServiceSettings.SiteURL == nil { - o.ServiceSettings.SiteURL = new(string) - *o.ServiceSettings.SiteURL = SERVICE_SETTINGS_DEFAULT_SITE_URL + if !IsSafeLink(s.ReportAProblemLink) { + *s.ReportAProblemLink = "" } - if o.ServiceSettings.LicenseFileLocation == nil { - o.ServiceSettings.LicenseFileLocation = new(string) + if s.ReportAProblemLink == nil { + s.ReportAProblemLink = NewString(SUPPORT_SETTINGS_DEFAULT_REPORT_A_PROBLEM_LINK) } - if o.ServiceSettings.EnableAPIv3 == nil { - o.ServiceSettings.EnableAPIv3 = new(bool) - *o.ServiceSettings.EnableAPIv3 = true + if s.SupportEmail == nil { + s.SupportEmail = NewString(SUPPORT_SETTINGS_DEFAULT_SUPPORT_EMAIL) } +} - if o.ServiceSettings.EnableLinkPreviews == nil { - o.ServiceSettings.EnableLinkPreviews = new(bool) - *o.ServiceSettings.EnableLinkPreviews = false - } +type AnnouncementSettings struct { + EnableBanner *bool + BannerText *string + BannerColor *string + BannerTextColor *string + AllowBannerDismissal *bool +} - if o.ServiceSettings.EnableDeveloper == nil { - o.ServiceSettings.EnableDeveloper = new(bool) - *o.ServiceSettings.EnableDeveloper = false +func (s *AnnouncementSettings) SetDefaults() { + if s.EnableBanner == nil { + s.EnableBanner = NewBool(false) } - if o.ServiceSettings.EnableSecurityFixAlert == nil { - o.ServiceSettings.EnableSecurityFixAlert = new(bool) - *o.ServiceSettings.EnableSecurityFixAlert = true + if s.BannerText == nil { + s.BannerText = NewString("") } - if o.ServiceSettings.EnableInsecureOutgoingConnections == nil { - o.ServiceSettings.EnableInsecureOutgoingConnections = new(bool) - *o.ServiceSettings.EnableInsecureOutgoingConnections = false + if s.BannerColor == nil { + s.BannerColor = NewString(ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_COLOR) } - if o.ServiceSettings.EnableMultifactorAuthentication == nil { - o.ServiceSettings.EnableMultifactorAuthentication = new(bool) - *o.ServiceSettings.EnableMultifactorAuthentication = false + if s.BannerTextColor == nil { + s.BannerTextColor = NewString(ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_TEXT_COLOR) } - if o.ServiceSettings.EnforceMultifactorAuthentication == nil { - o.ServiceSettings.EnforceMultifactorAuthentication = new(bool) - *o.ServiceSettings.EnforceMultifactorAuthentication = false + if s.AllowBannerDismissal == nil { + s.AllowBannerDismissal = NewBool(true) } +} - if o.ServiceSettings.EnableUserAccessTokens == nil { - o.ServiceSettings.EnableUserAccessTokens = new(bool) - *o.ServiceSettings.EnableUserAccessTokens = false - } +type ThemeSettings struct { + EnableThemeSelection *bool + DefaultTheme *string + AllowCustomThemes *bool + AllowedThemes []string +} - if o.PasswordSettings.MinimumLength == nil { - o.PasswordSettings.MinimumLength = new(int) - *o.PasswordSettings.MinimumLength = PASSWORD_MINIMUM_LENGTH +func (s *ThemeSettings) SetDefaults() { + if s.EnableThemeSelection == nil { + s.EnableThemeSelection = NewBool(true) } - if o.PasswordSettings.Lowercase == nil { - o.PasswordSettings.Lowercase = new(bool) - *o.PasswordSettings.Lowercase = false + if s.DefaultTheme == nil { + s.DefaultTheme = NewString(TEAM_SETTINGS_DEFAULT_TEAM_TEXT) } - if o.PasswordSettings.Number == nil { - o.PasswordSettings.Number = new(bool) - *o.PasswordSettings.Number = false + if s.AllowCustomThemes == nil { + s.AllowCustomThemes = NewBool(true) } - if o.PasswordSettings.Uppercase == nil { - o.PasswordSettings.Uppercase = new(bool) - *o.PasswordSettings.Uppercase = false + if s.AllowedThemes == nil { + s.AllowedThemes = []string{} } +} + +type TeamSettings struct { + SiteName string + MaxUsersPerTeam *int + EnableTeamCreation bool + EnableUserCreation bool + EnableOpenServer *bool + RestrictCreationToDomains string + EnableCustomBrand *bool + CustomBrandText *string + CustomDescriptionText *string + RestrictDirectMessage *string + RestrictTeamInvite *string + RestrictPublicChannelManagement *string + RestrictPrivateChannelManagement *string + RestrictPublicChannelCreation *string + RestrictPrivateChannelCreation *string + RestrictPublicChannelDeletion *string + RestrictPrivateChannelDeletion *string + RestrictPrivateChannelManageMembers *string + EnableXToLeaveChannelsFromLHS *bool + UserStatusAwayTimeout *int64 + MaxChannelsPerTeam *int64 + MaxNotificationsPerChannel *int64 + EnableConfirmNotificationsToChannel *bool + TeammateNameDisplay *string + ExperimentalTownSquareIsReadOnly *bool + ExperimentalPrimaryTeam *string +} - if o.PasswordSettings.Symbol == nil { - o.PasswordSettings.Symbol = new(bool) - *o.PasswordSettings.Symbol = false +func (s *TeamSettings) SetDefaults() { + if s.MaxUsersPerTeam == nil { + s.MaxUsersPerTeam = NewInt(TEAM_SETTINGS_DEFAULT_MAX_USERS_PER_TEAM) } - if o.TeamSettings.EnableCustomBrand == nil { - o.TeamSettings.EnableCustomBrand = new(bool) - *o.TeamSettings.EnableCustomBrand = false + if s.EnableCustomBrand == nil { + s.EnableCustomBrand = NewBool(false) } - if o.TeamSettings.CustomBrandText == nil { - o.TeamSettings.CustomBrandText = new(string) - *o.TeamSettings.CustomBrandText = TEAM_SETTINGS_DEFAULT_CUSTOM_BRAND_TEXT + if s.CustomBrandText == nil { + s.CustomBrandText = NewString(TEAM_SETTINGS_DEFAULT_CUSTOM_BRAND_TEXT) } - if o.TeamSettings.CustomDescriptionText == nil { - o.TeamSettings.CustomDescriptionText = new(string) - *o.TeamSettings.CustomDescriptionText = TEAM_SETTINGS_DEFAULT_CUSTOM_DESCRIPTION_TEXT + if s.CustomDescriptionText == nil { + s.CustomDescriptionText = NewString(TEAM_SETTINGS_DEFAULT_CUSTOM_DESCRIPTION_TEXT) } - if o.TeamSettings.EnableOpenServer == nil { - o.TeamSettings.EnableOpenServer = new(bool) - *o.TeamSettings.EnableOpenServer = false + if s.EnableOpenServer == nil { + s.EnableOpenServer = NewBool(false) } - if o.TeamSettings.RestrictDirectMessage == nil { - o.TeamSettings.RestrictDirectMessage = new(string) - *o.TeamSettings.RestrictDirectMessage = DIRECT_MESSAGE_ANY + if s.RestrictDirectMessage == nil { + s.RestrictDirectMessage = NewString(DIRECT_MESSAGE_ANY) } - if o.TeamSettings.RestrictTeamInvite == nil { - o.TeamSettings.RestrictTeamInvite = new(string) - *o.TeamSettings.RestrictTeamInvite = PERMISSIONS_ALL + if s.RestrictTeamInvite == nil { + s.RestrictTeamInvite = NewString(PERMISSIONS_ALL) } - if o.TeamSettings.RestrictPublicChannelManagement == nil { - o.TeamSettings.RestrictPublicChannelManagement = new(string) - *o.TeamSettings.RestrictPublicChannelManagement = PERMISSIONS_ALL + if s.RestrictPublicChannelManagement == nil { + s.RestrictPublicChannelManagement = NewString(PERMISSIONS_ALL) } - if o.TeamSettings.RestrictPrivateChannelManagement == nil { - o.TeamSettings.RestrictPrivateChannelManagement = new(string) - *o.TeamSettings.RestrictPrivateChannelManagement = PERMISSIONS_ALL + if s.RestrictPrivateChannelManagement == nil { + s.RestrictPrivateChannelManagement = NewString(PERMISSIONS_ALL) } - if o.TeamSettings.RestrictPublicChannelCreation == nil { - o.TeamSettings.RestrictPublicChannelCreation = new(string) + if s.RestrictPublicChannelCreation == nil { + s.RestrictPublicChannelCreation = new(string) // If this setting does not exist, assume migration from <3.6, so use management setting as default. - if *o.TeamSettings.RestrictPublicChannelManagement == PERMISSIONS_CHANNEL_ADMIN { - *o.TeamSettings.RestrictPublicChannelCreation = PERMISSIONS_TEAM_ADMIN + if *s.RestrictPublicChannelManagement == PERMISSIONS_CHANNEL_ADMIN { + *s.RestrictPublicChannelCreation = PERMISSIONS_TEAM_ADMIN } else { - *o.TeamSettings.RestrictPublicChannelCreation = *o.TeamSettings.RestrictPublicChannelManagement + *s.RestrictPublicChannelCreation = *s.RestrictPublicChannelManagement } } - if o.TeamSettings.RestrictPrivateChannelCreation == nil { - o.TeamSettings.RestrictPrivateChannelCreation = new(string) + if s.RestrictPrivateChannelCreation == nil { // If this setting does not exist, assume migration from <3.6, so use management setting as default. - if *o.TeamSettings.RestrictPrivateChannelManagement == PERMISSIONS_CHANNEL_ADMIN { - *o.TeamSettings.RestrictPrivateChannelCreation = PERMISSIONS_TEAM_ADMIN + if *s.RestrictPrivateChannelManagement == PERMISSIONS_CHANNEL_ADMIN { + s.RestrictPrivateChannelCreation = NewString(PERMISSIONS_TEAM_ADMIN) } else { - *o.TeamSettings.RestrictPrivateChannelCreation = *o.TeamSettings.RestrictPrivateChannelManagement + s.RestrictPrivateChannelCreation = NewString(*s.RestrictPrivateChannelManagement) } } - if o.TeamSettings.RestrictPublicChannelDeletion == nil { - o.TeamSettings.RestrictPublicChannelDeletion = new(string) + if s.RestrictPublicChannelDeletion == nil { // If this setting does not exist, assume migration from <3.6, so use management setting as default. - *o.TeamSettings.RestrictPublicChannelDeletion = *o.TeamSettings.RestrictPublicChannelManagement + s.RestrictPublicChannelDeletion = NewString(*s.RestrictPublicChannelManagement) } - if o.TeamSettings.RestrictPrivateChannelDeletion == nil { - o.TeamSettings.RestrictPrivateChannelDeletion = new(string) + if s.RestrictPrivateChannelDeletion == nil { // If this setting does not exist, assume migration from <3.6, so use management setting as default. - *o.TeamSettings.RestrictPrivateChannelDeletion = *o.TeamSettings.RestrictPrivateChannelManagement + s.RestrictPrivateChannelDeletion = NewString(*s.RestrictPrivateChannelManagement) } - if o.TeamSettings.RestrictPrivateChannelManageMembers == nil { - o.TeamSettings.RestrictPrivateChannelManageMembers = new(string) - *o.TeamSettings.RestrictPrivateChannelManageMembers = PERMISSIONS_ALL + if s.RestrictPrivateChannelManageMembers == nil { + s.RestrictPrivateChannelManageMembers = NewString(PERMISSIONS_ALL) } - if o.TeamSettings.UserStatusAwayTimeout == nil { - o.TeamSettings.UserStatusAwayTimeout = new(int64) - *o.TeamSettings.UserStatusAwayTimeout = TEAM_SETTINGS_DEFAULT_USER_STATUS_AWAY_TIMEOUT + if s.EnableXToLeaveChannelsFromLHS == nil { + s.EnableXToLeaveChannelsFromLHS = NewBool(false) } - if o.TeamSettings.MaxChannelsPerTeam == nil { - o.TeamSettings.MaxChannelsPerTeam = new(int64) - *o.TeamSettings.MaxChannelsPerTeam = 2000 + if s.UserStatusAwayTimeout == nil { + s.UserStatusAwayTimeout = NewInt64(TEAM_SETTINGS_DEFAULT_USER_STATUS_AWAY_TIMEOUT) } - if o.TeamSettings.MaxNotificationsPerChannel == nil { - o.TeamSettings.MaxNotificationsPerChannel = new(int64) - *o.TeamSettings.MaxNotificationsPerChannel = 1000 + if s.MaxChannelsPerTeam == nil { + s.MaxChannelsPerTeam = NewInt64(2000) } - if o.EmailSettings.EnableSignInWithEmail == nil { - o.EmailSettings.EnableSignInWithEmail = new(bool) - - if o.EmailSettings.EnableSignUpWithEmail == true { - *o.EmailSettings.EnableSignInWithEmail = true - } else { - *o.EmailSettings.EnableSignInWithEmail = false - } + if s.MaxNotificationsPerChannel == nil { + s.MaxNotificationsPerChannel = NewInt64(1000) } - if o.EmailSettings.EnableSignInWithUsername == nil { - o.EmailSettings.EnableSignInWithUsername = new(bool) - *o.EmailSettings.EnableSignInWithUsername = false + if s.EnableConfirmNotificationsToChannel == nil { + s.EnableConfirmNotificationsToChannel = NewBool(true) } - if o.EmailSettings.SendPushNotifications == nil { - o.EmailSettings.SendPushNotifications = new(bool) - *o.EmailSettings.SendPushNotifications = false + if s.ExperimentalTownSquareIsReadOnly == nil { + s.ExperimentalTownSquareIsReadOnly = NewBool(false) } - if o.EmailSettings.PushNotificationServer == nil { - o.EmailSettings.PushNotificationServer = new(string) - *o.EmailSettings.PushNotificationServer = "" + if s.ExperimentalPrimaryTeam == nil { + s.ExperimentalPrimaryTeam = NewString("") } +} - if o.EmailSettings.PushNotificationContents == nil { - o.EmailSettings.PushNotificationContents = new(string) - *o.EmailSettings.PushNotificationContents = GENERIC_NOTIFICATION - } +type ClientRequirements struct { + AndroidLatestVersion string + AndroidMinVersion string + DesktopLatestVersion string + DesktopMinVersion string + IosLatestVersion string + IosMinVersion string +} - if o.EmailSettings.FeedbackOrganization == nil { - o.EmailSettings.FeedbackOrganization = new(string) - *o.EmailSettings.FeedbackOrganization = EMAIL_SETTINGS_DEFAULT_FEEDBACK_ORGANIZATION - } +type LdapSettings struct { + // Basic + Enable *bool + EnableSync *bool + LdapServer *string + LdapPort *int + ConnectionSecurity *string + BaseDN *string + BindUsername *string + BindPassword *string - if o.EmailSettings.EnableEmailBatching == nil { - o.EmailSettings.EnableEmailBatching = new(bool) - *o.EmailSettings.EnableEmailBatching = false - } + // Filtering + UserFilter *string - if o.EmailSettings.EmailBatchingBufferSize == nil { - o.EmailSettings.EmailBatchingBufferSize = new(int) - *o.EmailSettings.EmailBatchingBufferSize = EMAIL_BATCHING_BUFFER_SIZE - } + // User Mapping + FirstNameAttribute *string + LastNameAttribute *string + EmailAttribute *string + UsernameAttribute *string + NicknameAttribute *string + IdAttribute *string + PositionAttribute *string - if o.EmailSettings.EmailBatchingInterval == nil { - o.EmailSettings.EmailBatchingInterval = new(int) - *o.EmailSettings.EmailBatchingInterval = EMAIL_BATCHING_INTERVAL - } + // Syncronization + SyncIntervalMinutes *int - if o.EmailSettings.EnableSMTPAuth == nil { - o.EmailSettings.EnableSMTPAuth = new(bool) - if o.EmailSettings.ConnectionSecurity == CONN_SECURITY_NONE { - *o.EmailSettings.EnableSMTPAuth = false - } else { - *o.EmailSettings.EnableSMTPAuth = true - } - } + // Advanced + SkipCertificateVerification *bool + QueryTimeout *int + MaxPageSize *int - if o.EmailSettings.ConnectionSecurity == CONN_SECURITY_PLAIN { - o.EmailSettings.ConnectionSecurity = CONN_SECURITY_NONE - } + // Customization + LoginFieldName *string - if o.EmailSettings.SkipServerCertificateVerification == nil { - o.EmailSettings.SkipServerCertificateVerification = new(bool) - *o.EmailSettings.SkipServerCertificateVerification = false - } + LoginButtonColor *string + LoginButtonBorderColor *string + LoginButtonTextColor *string +} - if o.EmailSettings.EmailNotificationContentsType == nil { - o.EmailSettings.EmailNotificationContentsType = new(string) - *o.EmailSettings.EmailNotificationContentsType = EMAIL_NOTIFICATION_CONTENTS_FULL +func (s *LdapSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) } - if !IsSafeLink(o.SupportSettings.TermsOfServiceLink) { - *o.SupportSettings.TermsOfServiceLink = SUPPORT_SETTINGS_DEFAULT_TERMS_OF_SERVICE_LINK + // When unset should default to LDAP Enabled + if s.EnableSync == nil { + s.EnableSync = NewBool(*s.Enable) } - if o.SupportSettings.TermsOfServiceLink == nil { - o.SupportSettings.TermsOfServiceLink = new(string) - *o.SupportSettings.TermsOfServiceLink = SUPPORT_SETTINGS_DEFAULT_TERMS_OF_SERVICE_LINK + if s.LdapServer == nil { + s.LdapServer = NewString("") } - if !IsSafeLink(o.SupportSettings.PrivacyPolicyLink) { - *o.SupportSettings.PrivacyPolicyLink = "" + if s.LdapPort == nil { + s.LdapPort = NewInt(389) } - if o.SupportSettings.PrivacyPolicyLink == nil { - o.SupportSettings.PrivacyPolicyLink = new(string) - *o.SupportSettings.PrivacyPolicyLink = SUPPORT_SETTINGS_DEFAULT_PRIVACY_POLICY_LINK + if s.ConnectionSecurity == nil { + s.ConnectionSecurity = NewString("") } - if !IsSafeLink(o.SupportSettings.AboutLink) { - *o.SupportSettings.AboutLink = "" + if s.BaseDN == nil { + s.BaseDN = NewString("") } - if o.SupportSettings.AboutLink == nil { - o.SupportSettings.AboutLink = new(string) - *o.SupportSettings.AboutLink = SUPPORT_SETTINGS_DEFAULT_ABOUT_LINK + if s.BindUsername == nil { + s.BindUsername = NewString("") } - if !IsSafeLink(o.SupportSettings.HelpLink) { - *o.SupportSettings.HelpLink = "" + if s.BindPassword == nil { + s.BindPassword = NewString("") } - if o.SupportSettings.HelpLink == nil { - o.SupportSettings.HelpLink = new(string) - *o.SupportSettings.HelpLink = SUPPORT_SETTINGS_DEFAULT_HELP_LINK + if s.UserFilter == nil { + s.UserFilter = NewString("") } - if !IsSafeLink(o.SupportSettings.ReportAProblemLink) { - *o.SupportSettings.ReportAProblemLink = "" + if s.FirstNameAttribute == nil { + s.FirstNameAttribute = NewString(LDAP_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE) } - if o.SupportSettings.ReportAProblemLink == nil { - o.SupportSettings.ReportAProblemLink = new(string) - *o.SupportSettings.ReportAProblemLink = SUPPORT_SETTINGS_DEFAULT_REPORT_A_PROBLEM_LINK + if s.LastNameAttribute == nil { + s.LastNameAttribute = NewString(LDAP_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE) } - if !IsSafeLink(o.SupportSettings.AdministratorsGuideLink) { - *o.SupportSettings.AdministratorsGuideLink = "" + if s.EmailAttribute == nil { + s.EmailAttribute = NewString(LDAP_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE) } - if o.SupportSettings.AdministratorsGuideLink == nil { - o.SupportSettings.AdministratorsGuideLink = new(string) - *o.SupportSettings.AdministratorsGuideLink = SUPPORT_SETTINGS_DEFAULT_ADMINISTRATORS_GUIDE_LINK + if s.UsernameAttribute == nil { + s.UsernameAttribute = NewString(LDAP_SETTINGS_DEFAULT_USERNAME_ATTRIBUTE) } - if !IsSafeLink(o.SupportSettings.TroubleshootingForumLink) { - *o.SupportSettings.TroubleshootingForumLink = "" + if s.NicknameAttribute == nil { + s.NicknameAttribute = NewString(LDAP_SETTINGS_DEFAULT_NICKNAME_ATTRIBUTE) } - if o.SupportSettings.TroubleshootingForumLink == nil { - o.SupportSettings.TroubleshootingForumLink = new(string) - *o.SupportSettings.TroubleshootingForumLink = SUPPORT_SETTINGS_DEFAULT_TROUBLESHOOTING_FORUM_LINK + if s.IdAttribute == nil { + s.IdAttribute = NewString(LDAP_SETTINGS_DEFAULT_ID_ATTRIBUTE) } - if !IsSafeLink(o.SupportSettings.CommercialSupportLink) { - *o.SupportSettings.CommercialSupportLink = "" + if s.PositionAttribute == nil { + s.PositionAttribute = NewString(LDAP_SETTINGS_DEFAULT_POSITION_ATTRIBUTE) } - if o.SupportSettings.CommercialSupportLink == nil { - o.SupportSettings.CommercialSupportLink = new(string) - *o.SupportSettings.CommercialSupportLink = SUPPORT_SETTINGS_DEFAULT_COMMERCIAL_SUPPORT_LINK + if s.SyncIntervalMinutes == nil { + s.SyncIntervalMinutes = NewInt(60) } - if o.SupportSettings.SupportEmail == nil { - o.SupportSettings.SupportEmail = new(string) - *o.SupportSettings.SupportEmail = SUPPORT_SETTINGS_DEFAULT_SUPPORT_EMAIL + if s.SkipCertificateVerification == nil { + s.SkipCertificateVerification = NewBool(false) } - if o.AnnouncementSettings.EnableBanner == nil { - o.AnnouncementSettings.EnableBanner = new(bool) - *o.AnnouncementSettings.EnableBanner = false + if s.QueryTimeout == nil { + s.QueryTimeout = NewInt(60) } - if o.AnnouncementSettings.BannerText == nil { - o.AnnouncementSettings.BannerText = new(string) - *o.AnnouncementSettings.BannerText = "" + if s.MaxPageSize == nil { + s.MaxPageSize = NewInt(0) } - if o.AnnouncementSettings.BannerColor == nil { - o.AnnouncementSettings.BannerColor = new(string) - *o.AnnouncementSettings.BannerColor = ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_COLOR + if s.LoginFieldName == nil { + s.LoginFieldName = NewString(LDAP_SETTINGS_DEFAULT_LOGIN_FIELD_NAME) } - if o.AnnouncementSettings.BannerTextColor == nil { - o.AnnouncementSettings.BannerTextColor = new(string) - *o.AnnouncementSettings.BannerTextColor = ANNOUNCEMENT_SETTINGS_DEFAULT_BANNER_TEXT_COLOR + if s.LoginButtonColor == nil { + s.LoginButtonColor = NewString("#0000") } - if o.AnnouncementSettings.AllowBannerDismissal == nil { - o.AnnouncementSettings.AllowBannerDismissal = new(bool) - *o.AnnouncementSettings.AllowBannerDismissal = true + if s.LoginButtonBorderColor == nil { + s.LoginButtonBorderColor = NewString("#2389D7") } - if o.LdapSettings.Enable == nil { - o.LdapSettings.Enable = new(bool) - *o.LdapSettings.Enable = false + if s.LoginButtonTextColor == nil { + s.LoginButtonTextColor = NewString("#2389D7") } +} - if o.LdapSettings.LdapServer == nil { - o.LdapSettings.LdapServer = new(string) - *o.LdapSettings.LdapServer = "" - } +type ComplianceSettings struct { + Enable *bool + Directory *string + EnableDaily *bool +} - if o.LdapSettings.LdapPort == nil { - o.LdapSettings.LdapPort = new(int) - *o.LdapSettings.LdapPort = 389 +func (s *ComplianceSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) } - if o.LdapSettings.ConnectionSecurity == nil { - o.LdapSettings.ConnectionSecurity = new(string) - *o.LdapSettings.ConnectionSecurity = "" + if s.Directory == nil { + s.Directory = NewString("./data/") } - if o.LdapSettings.BaseDN == nil { - o.LdapSettings.BaseDN = new(string) - *o.LdapSettings.BaseDN = "" + if s.EnableDaily == nil { + s.EnableDaily = NewBool(false) } +} - if o.LdapSettings.BindUsername == nil { - o.LdapSettings.BindUsername = new(string) - *o.LdapSettings.BindUsername = "" - } +type LocalizationSettings struct { + DefaultServerLocale *string + DefaultClientLocale *string + AvailableLocales *string +} - if o.LdapSettings.BindPassword == nil { - o.LdapSettings.BindPassword = new(string) - *o.LdapSettings.BindPassword = "" +func (s *LocalizationSettings) SetDefaults() { + if s.DefaultServerLocale == nil { + s.DefaultServerLocale = NewString(DEFAULT_LOCALE) } - if o.LdapSettings.UserFilter == nil { - o.LdapSettings.UserFilter = new(string) - *o.LdapSettings.UserFilter = "" + if s.DefaultClientLocale == nil { + s.DefaultClientLocale = NewString(DEFAULT_LOCALE) } - if o.LdapSettings.FirstNameAttribute == nil { - o.LdapSettings.FirstNameAttribute = new(string) - *o.LdapSettings.FirstNameAttribute = LDAP_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE + if s.AvailableLocales == nil { + s.AvailableLocales = NewString("") } +} - if o.LdapSettings.LastNameAttribute == nil { - o.LdapSettings.LastNameAttribute = new(string) - *o.LdapSettings.LastNameAttribute = LDAP_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE - } +type SamlSettings struct { + // Basic + Enable *bool + EnableSyncWithLdap *bool - if o.LdapSettings.EmailAttribute == nil { - o.LdapSettings.EmailAttribute = new(string) - *o.LdapSettings.EmailAttribute = LDAP_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE - } + Verify *bool + Encrypt *bool - if o.LdapSettings.UsernameAttribute == nil { - o.LdapSettings.UsernameAttribute = new(string) - *o.LdapSettings.UsernameAttribute = LDAP_SETTINGS_DEFAULT_USERNAME_ATTRIBUTE - } + IdpUrl *string + IdpDescriptorUrl *string + AssertionConsumerServiceURL *string - if o.LdapSettings.NicknameAttribute == nil { - o.LdapSettings.NicknameAttribute = new(string) - *o.LdapSettings.NicknameAttribute = LDAP_SETTINGS_DEFAULT_NICKNAME_ATTRIBUTE - } + IdpCertificateFile *string + PublicCertificateFile *string + PrivateKeyFile *string - if o.LdapSettings.IdAttribute == nil { - o.LdapSettings.IdAttribute = new(string) - *o.LdapSettings.IdAttribute = LDAP_SETTINGS_DEFAULT_ID_ATTRIBUTE - } + // User Mapping + FirstNameAttribute *string + LastNameAttribute *string + EmailAttribute *string + UsernameAttribute *string + NicknameAttribute *string + LocaleAttribute *string + PositionAttribute *string - if o.LdapSettings.PositionAttribute == nil { - o.LdapSettings.PositionAttribute = new(string) - *o.LdapSettings.PositionAttribute = LDAP_SETTINGS_DEFAULT_POSITION_ATTRIBUTE - } + LoginButtonText *string - if o.LdapSettings.SyncIntervalMinutes == nil { - o.LdapSettings.SyncIntervalMinutes = new(int) - *o.LdapSettings.SyncIntervalMinutes = 60 - } + LoginButtonColor *string + LoginButtonBorderColor *string + LoginButtonTextColor *string +} - if o.LdapSettings.SkipCertificateVerification == nil { - o.LdapSettings.SkipCertificateVerification = new(bool) - *o.LdapSettings.SkipCertificateVerification = false +func (s *SamlSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) } - if o.LdapSettings.QueryTimeout == nil { - o.LdapSettings.QueryTimeout = new(int) - *o.LdapSettings.QueryTimeout = 60 + if s.EnableSyncWithLdap == nil { + s.EnableSyncWithLdap = NewBool(false) } - if o.LdapSettings.MaxPageSize == nil { - o.LdapSettings.MaxPageSize = new(int) - *o.LdapSettings.MaxPageSize = 0 + if s.Verify == nil { + s.Verify = NewBool(true) } - if o.LdapSettings.LoginFieldName == nil { - o.LdapSettings.LoginFieldName = new(string) - *o.LdapSettings.LoginFieldName = LDAP_SETTINGS_DEFAULT_LOGIN_FIELD_NAME + if s.Encrypt == nil { + s.Encrypt = NewBool(true) } - if o.ServiceSettings.SessionLengthWebInDays == nil { - o.ServiceSettings.SessionLengthWebInDays = new(int) - *o.ServiceSettings.SessionLengthWebInDays = 30 + if s.IdpUrl == nil { + s.IdpUrl = NewString("") } - if o.ServiceSettings.SessionLengthMobileInDays == nil { - o.ServiceSettings.SessionLengthMobileInDays = new(int) - *o.ServiceSettings.SessionLengthMobileInDays = 30 + if s.IdpDescriptorUrl == nil { + s.IdpDescriptorUrl = NewString("") } - if o.ServiceSettings.SessionLengthSSOInDays == nil { - o.ServiceSettings.SessionLengthSSOInDays = new(int) - *o.ServiceSettings.SessionLengthSSOInDays = 30 + if s.IdpCertificateFile == nil { + s.IdpCertificateFile = NewString("") } - if o.ServiceSettings.SessionCacheInMinutes == nil { - o.ServiceSettings.SessionCacheInMinutes = new(int) - *o.ServiceSettings.SessionCacheInMinutes = 10 + if s.PublicCertificateFile == nil { + s.PublicCertificateFile = NewString("") } - if o.ServiceSettings.EnableCommands == nil { - o.ServiceSettings.EnableCommands = new(bool) - *o.ServiceSettings.EnableCommands = false + if s.PrivateKeyFile == nil { + s.PrivateKeyFile = NewString("") } - if o.ServiceSettings.EnableOnlyAdminIntegrations == nil { - o.ServiceSettings.EnableOnlyAdminIntegrations = new(bool) - *o.ServiceSettings.EnableOnlyAdminIntegrations = true + if s.AssertionConsumerServiceURL == nil { + s.AssertionConsumerServiceURL = NewString("") } - if o.ServiceSettings.WebsocketPort == nil { - o.ServiceSettings.WebsocketPort = new(int) - *o.ServiceSettings.WebsocketPort = 80 + if s.LoginButtonText == nil || *s.LoginButtonText == "" { + s.LoginButtonText = NewString(USER_AUTH_SERVICE_SAML_TEXT) } - if o.ServiceSettings.WebsocketSecurePort == nil { - o.ServiceSettings.WebsocketSecurePort = new(int) - *o.ServiceSettings.WebsocketSecurePort = 443 + if s.FirstNameAttribute == nil { + s.FirstNameAttribute = NewString(SAML_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE) } - if o.ServiceSettings.AllowCorsFrom == nil { - o.ServiceSettings.AllowCorsFrom = new(string) - *o.ServiceSettings.AllowCorsFrom = SERVICE_SETTINGS_DEFAULT_ALLOW_CORS_FROM + if s.LastNameAttribute == nil { + s.LastNameAttribute = NewString(SAML_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE) } - if o.ServiceSettings.WebserverMode == nil { - o.ServiceSettings.WebserverMode = new(string) - *o.ServiceSettings.WebserverMode = "gzip" - } else if *o.ServiceSettings.WebserverMode == "regular" { - *o.ServiceSettings.WebserverMode = "gzip" + if s.EmailAttribute == nil { + s.EmailAttribute = NewString(SAML_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE) } - if o.ServiceSettings.EnableCustomEmoji == nil { - o.ServiceSettings.EnableCustomEmoji = new(bool) - *o.ServiceSettings.EnableCustomEmoji = false + if s.UsernameAttribute == nil { + s.UsernameAttribute = NewString(SAML_SETTINGS_DEFAULT_USERNAME_ATTRIBUTE) } - if o.ServiceSettings.EnableEmojiPicker == nil { - o.ServiceSettings.EnableEmojiPicker = new(bool) - *o.ServiceSettings.EnableEmojiPicker = true + if s.NicknameAttribute == nil { + s.NicknameAttribute = NewString(SAML_SETTINGS_DEFAULT_NICKNAME_ATTRIBUTE) } - if o.ServiceSettings.RestrictCustomEmojiCreation == nil { - o.ServiceSettings.RestrictCustomEmojiCreation = new(string) - *o.ServiceSettings.RestrictCustomEmojiCreation = RESTRICT_EMOJI_CREATION_ALL + if s.PositionAttribute == nil { + s.PositionAttribute = NewString(SAML_SETTINGS_DEFAULT_POSITION_ATTRIBUTE) } - if o.ServiceSettings.RestrictPostDelete == nil { - o.ServiceSettings.RestrictPostDelete = new(string) - *o.ServiceSettings.RestrictPostDelete = PERMISSIONS_DELETE_POST_ALL + if s.LocaleAttribute == nil { + s.LocaleAttribute = NewString(SAML_SETTINGS_DEFAULT_LOCALE_ATTRIBUTE) } - if o.ServiceSettings.AllowEditPost == nil { - o.ServiceSettings.AllowEditPost = new(string) - *o.ServiceSettings.AllowEditPost = ALLOW_EDIT_POST_ALWAYS + if s.LoginButtonColor == nil { + s.LoginButtonColor = NewString("#34a28b") } - if o.ServiceSettings.PostEditTimeLimit == nil { - o.ServiceSettings.PostEditTimeLimit = new(int) - *o.ServiceSettings.PostEditTimeLimit = 300 + if s.LoginButtonBorderColor == nil { + s.LoginButtonBorderColor = NewString("#2389D7") } - if o.ClusterSettings.Enable == nil { - o.ClusterSettings.Enable = new(bool) - *o.ClusterSettings.Enable = false + if s.LoginButtonTextColor == nil { + s.LoginButtonTextColor = NewString("#ffffff") } +} - if o.ClusterSettings.ClusterName == nil { - o.ClusterSettings.ClusterName = new(string) - *o.ClusterSettings.ClusterName = "" - } +type NativeAppSettings struct { + AppDownloadLink *string + AndroidAppDownloadLink *string + IosAppDownloadLink *string +} - if o.ClusterSettings.OverrideHostname == nil { - o.ClusterSettings.OverrideHostname = new(string) - *o.ClusterSettings.OverrideHostname = "" +func (s *NativeAppSettings) SetDefaults() { + if s.AppDownloadLink == nil { + s.AppDownloadLink = NewString(NATIVEAPP_SETTINGS_DEFAULT_APP_DOWNLOAD_LINK) } - if o.ClusterSettings.UseIpAddress == nil { - o.ClusterSettings.UseIpAddress = new(bool) - *o.ClusterSettings.UseIpAddress = true + if s.AndroidAppDownloadLink == nil { + s.AndroidAppDownloadLink = NewString(NATIVEAPP_SETTINGS_DEFAULT_ANDROID_APP_DOWNLOAD_LINK) } - if o.ClusterSettings.UseExperimentalGossip == nil { - o.ClusterSettings.UseExperimentalGossip = new(bool) - *o.ClusterSettings.UseExperimentalGossip = false + if s.IosAppDownloadLink == nil { + s.IosAppDownloadLink = NewString(NATIVEAPP_SETTINGS_DEFAULT_IOS_APP_DOWNLOAD_LINK) } +} - if o.ClusterSettings.ReadOnlyConfig == nil { - o.ClusterSettings.ReadOnlyConfig = new(bool) - *o.ClusterSettings.ReadOnlyConfig = true - } +type WebrtcSettings struct { + Enable *bool + GatewayWebsocketUrl *string + GatewayAdminUrl *string + GatewayAdminSecret *string + StunURI *string + TurnURI *string + TurnUsername *string + TurnSharedKey *string +} - if o.ClusterSettings.GossipPort == nil { - o.ClusterSettings.GossipPort = new(int) - *o.ClusterSettings.GossipPort = 8074 +func (s *WebrtcSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(false) } - if o.ClusterSettings.StreamingPort == nil { - o.ClusterSettings.StreamingPort = new(int) - *o.ClusterSettings.StreamingPort = 8075 + if s.GatewayWebsocketUrl == nil { + s.GatewayWebsocketUrl = NewString("") } - if o.MetricsSettings.ListenAddress == nil { - o.MetricsSettings.ListenAddress = new(string) - *o.MetricsSettings.ListenAddress = ":8067" + if s.GatewayAdminUrl == nil { + s.GatewayAdminUrl = NewString("") } - if o.MetricsSettings.Enable == nil { - o.MetricsSettings.Enable = new(bool) - *o.MetricsSettings.Enable = false + if s.GatewayAdminSecret == nil { + s.GatewayAdminSecret = NewString("") } - if o.AnalyticsSettings.MaxUsersForStatistics == nil { - o.AnalyticsSettings.MaxUsersForStatistics = new(int) - *o.AnalyticsSettings.MaxUsersForStatistics = ANALYTICS_SETTINGS_DEFAULT_MAX_USERS_FOR_STATISTICS + if s.StunURI == nil { + s.StunURI = NewString(WEBRTC_SETTINGS_DEFAULT_STUN_URI) } - if o.ComplianceSettings.Enable == nil { - o.ComplianceSettings.Enable = new(bool) - *o.ComplianceSettings.Enable = false + if s.TurnURI == nil { + s.TurnURI = NewString(WEBRTC_SETTINGS_DEFAULT_TURN_URI) } - if o.ComplianceSettings.Directory == nil { - o.ComplianceSettings.Directory = new(string) - *o.ComplianceSettings.Directory = "./data/" + if s.TurnUsername == nil { + s.TurnUsername = NewString("") } - if o.ComplianceSettings.EnableDaily == nil { - o.ComplianceSettings.EnableDaily = new(bool) - *o.ComplianceSettings.EnableDaily = false + if s.TurnSharedKey == nil { + s.TurnSharedKey = NewString("") } +} + +type ElasticsearchSettings struct { + ConnectionUrl *string + Username *string + Password *string + EnableIndexing *bool + EnableSearching *bool + Sniff *bool + PostIndexReplicas *int + PostIndexShards *int + AggregatePostsAfterDays *int + PostsAggregatorJobStartTime *string + IndexPrefix *string + LiveIndexingBatchSize *int + BulkIndexingTimeWindowSeconds *int + RequestTimeoutSeconds *int +} - if o.LocalizationSettings.DefaultServerLocale == nil { - o.LocalizationSettings.DefaultServerLocale = new(string) - *o.LocalizationSettings.DefaultServerLocale = DEFAULT_LOCALE +func (s *ElasticsearchSettings) SetDefaults() { + if s.ConnectionUrl == nil { + s.ConnectionUrl = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_CONNECTION_URL) } - if o.LocalizationSettings.DefaultClientLocale == nil { - o.LocalizationSettings.DefaultClientLocale = new(string) - *o.LocalizationSettings.DefaultClientLocale = DEFAULT_LOCALE + if s.Username == nil { + s.Username = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_USERNAME) } - if o.LocalizationSettings.AvailableLocales == nil { - o.LocalizationSettings.AvailableLocales = new(string) - *o.LocalizationSettings.AvailableLocales = "" + if s.Password == nil { + s.Password = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_PASSWORD) } - if o.LogSettings.EnableDiagnostics == nil { - o.LogSettings.EnableDiagnostics = new(bool) - *o.LogSettings.EnableDiagnostics = true + if s.EnableIndexing == nil { + s.EnableIndexing = NewBool(false) } - if o.SamlSettings.Enable == nil { - o.SamlSettings.Enable = new(bool) - *o.SamlSettings.Enable = false + if s.EnableSearching == nil { + s.EnableSearching = NewBool(false) } - if o.SamlSettings.Verify == nil { - o.SamlSettings.Verify = new(bool) - *o.SamlSettings.Verify = true + if s.Sniff == nil { + s.Sniff = NewBool(true) } - if o.SamlSettings.Encrypt == nil { - o.SamlSettings.Encrypt = new(bool) - *o.SamlSettings.Encrypt = true + if s.PostIndexReplicas == nil { + s.PostIndexReplicas = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_REPLICAS) } - if o.SamlSettings.IdpUrl == nil { - o.SamlSettings.IdpUrl = new(string) - *o.SamlSettings.IdpUrl = "" + if s.PostIndexShards == nil { + s.PostIndexShards = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_SHARDS) } - if o.SamlSettings.IdpDescriptorUrl == nil { - o.SamlSettings.IdpDescriptorUrl = new(string) - *o.SamlSettings.IdpDescriptorUrl = "" + if s.AggregatePostsAfterDays == nil { + s.AggregatePostsAfterDays = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_AGGREGATE_POSTS_AFTER_DAYS) } - if o.SamlSettings.IdpCertificateFile == nil { - o.SamlSettings.IdpCertificateFile = new(string) - *o.SamlSettings.IdpCertificateFile = "" + if s.PostsAggregatorJobStartTime == nil { + s.PostsAggregatorJobStartTime = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_POSTS_AGGREGATOR_JOB_START_TIME) } - if o.SamlSettings.PublicCertificateFile == nil { - o.SamlSettings.PublicCertificateFile = new(string) - *o.SamlSettings.PublicCertificateFile = "" + if s.IndexPrefix == nil { + s.IndexPrefix = NewString(ELASTICSEARCH_SETTINGS_DEFAULT_INDEX_PREFIX) } - if o.SamlSettings.PrivateKeyFile == nil { - o.SamlSettings.PrivateKeyFile = new(string) - *o.SamlSettings.PrivateKeyFile = "" + if s.LiveIndexingBatchSize == nil { + s.LiveIndexingBatchSize = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_LIVE_INDEXING_BATCH_SIZE) } - if o.SamlSettings.AssertionConsumerServiceURL == nil { - o.SamlSettings.AssertionConsumerServiceURL = new(string) - *o.SamlSettings.AssertionConsumerServiceURL = "" + if s.BulkIndexingTimeWindowSeconds == nil { + s.BulkIndexingTimeWindowSeconds = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_BULK_INDEXING_TIME_WINDOW_SECONDS) } - if o.SamlSettings.LoginButtonText == nil || *o.SamlSettings.LoginButtonText == "" { - o.SamlSettings.LoginButtonText = new(string) - *o.SamlSettings.LoginButtonText = USER_AUTH_SERVICE_SAML_TEXT + if s.RequestTimeoutSeconds == nil { + s.RequestTimeoutSeconds = NewInt(ELASTICSEARCH_SETTINGS_DEFAULT_REQUEST_TIMEOUT_SECONDS) } +} + +type DataRetentionSettings struct { + EnableMessageDeletion *bool + EnableFileDeletion *bool + MessageRetentionDays *int + FileRetentionDays *int + DeletionJobStartTime *string +} - if o.SamlSettings.FirstNameAttribute == nil { - o.SamlSettings.FirstNameAttribute = new(string) - *o.SamlSettings.FirstNameAttribute = SAML_SETTINGS_DEFAULT_FIRST_NAME_ATTRIBUTE +func (s *DataRetentionSettings) SetDefaults() { + if s.EnableMessageDeletion == nil { + s.EnableMessageDeletion = NewBool(false) } - if o.SamlSettings.LastNameAttribute == nil { - o.SamlSettings.LastNameAttribute = new(string) - *o.SamlSettings.LastNameAttribute = SAML_SETTINGS_DEFAULT_LAST_NAME_ATTRIBUTE + if s.EnableFileDeletion == nil { + s.EnableFileDeletion = NewBool(false) } - if o.SamlSettings.EmailAttribute == nil { - o.SamlSettings.EmailAttribute = new(string) - *o.SamlSettings.EmailAttribute = SAML_SETTINGS_DEFAULT_EMAIL_ATTRIBUTE + if s.MessageRetentionDays == nil { + s.MessageRetentionDays = NewInt(DATA_RETENTION_SETTINGS_DEFAULT_MESSAGE_RETENTION_DAYS) } - if o.SamlSettings.UsernameAttribute == nil { - o.SamlSettings.UsernameAttribute = new(string) - *o.SamlSettings.UsernameAttribute = SAML_SETTINGS_DEFAULT_USERNAME_ATTRIBUTE + if s.FileRetentionDays == nil { + s.FileRetentionDays = NewInt(DATA_RETENTION_SETTINGS_DEFAULT_FILE_RETENTION_DAYS) } - if o.SamlSettings.NicknameAttribute == nil { - o.SamlSettings.NicknameAttribute = new(string) - *o.SamlSettings.NicknameAttribute = SAML_SETTINGS_DEFAULT_NICKNAME_ATTRIBUTE + if s.DeletionJobStartTime == nil { + s.DeletionJobStartTime = NewString(DATA_RETENTION_SETTINGS_DEFAULT_DELETION_JOB_START_TIME) } +} + +type JobSettings struct { + RunJobs *bool + RunScheduler *bool +} - if o.SamlSettings.PositionAttribute == nil { - o.SamlSettings.PositionAttribute = new(string) - *o.SamlSettings.PositionAttribute = SAML_SETTINGS_DEFAULT_POSITION_ATTRIBUTE +func (s *JobSettings) SetDefaults() { + if s.RunJobs == nil { + s.RunJobs = NewBool(true) } - if o.SamlSettings.LocaleAttribute == nil { - o.SamlSettings.LocaleAttribute = new(string) - *o.SamlSettings.LocaleAttribute = SAML_SETTINGS_DEFAULT_LOCALE_ATTRIBUTE + if s.RunScheduler == nil { + s.RunScheduler = NewBool(true) } +} - if o.TeamSettings.TeammateNameDisplay == nil { - o.TeamSettings.TeammateNameDisplay = new(string) - *o.TeamSettings.TeammateNameDisplay = SHOW_USERNAME +type PluginState struct { + Enable bool +} - if *o.SamlSettings.Enable || *o.LdapSettings.Enable { - *o.TeamSettings.TeammateNameDisplay = SHOW_FULLNAME - } +type PluginSettings struct { + Enable *bool + EnableUploads *bool + Directory *string + ClientDirectory *string + Plugins map[string]interface{} + PluginStates map[string]*PluginState +} + +func (s *PluginSettings) SetDefaults() { + if s.Enable == nil { + s.Enable = NewBool(true) } - if o.NativeAppSettings.AppDownloadLink == nil { - o.NativeAppSettings.AppDownloadLink = new(string) - *o.NativeAppSettings.AppDownloadLink = NATIVEAPP_SETTINGS_DEFAULT_APP_DOWNLOAD_LINK + if s.EnableUploads == nil { + s.EnableUploads = NewBool(false) } - if o.NativeAppSettings.AndroidAppDownloadLink == nil { - o.NativeAppSettings.AndroidAppDownloadLink = new(string) - *o.NativeAppSettings.AndroidAppDownloadLink = NATIVEAPP_SETTINGS_DEFAULT_ANDROID_APP_DOWNLOAD_LINK + if s.Directory == nil { + s.Directory = NewString(PLUGIN_SETTINGS_DEFAULT_DIRECTORY) } - if o.NativeAppSettings.IosAppDownloadLink == nil { - o.NativeAppSettings.IosAppDownloadLink = new(string) - *o.NativeAppSettings.IosAppDownloadLink = NATIVEAPP_SETTINGS_DEFAULT_IOS_APP_DOWNLOAD_LINK + if *s.Directory == "" { + *s.Directory = PLUGIN_SETTINGS_DEFAULT_DIRECTORY } - if o.RateLimitSettings.Enable == nil { - o.RateLimitSettings.Enable = new(bool) - *o.RateLimitSettings.Enable = false + if s.ClientDirectory == nil { + s.ClientDirectory = NewString(PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY) } - if o.ServiceSettings.GoroutineHealthThreshold == nil { - o.ServiceSettings.GoroutineHealthThreshold = new(int) - *o.ServiceSettings.GoroutineHealthThreshold = -1 + if *s.ClientDirectory == "" { + *s.ClientDirectory = PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY } - if o.RateLimitSettings.MaxBurst == nil { - o.RateLimitSettings.MaxBurst = new(int) - *o.RateLimitSettings.MaxBurst = 100 + if s.Plugins == nil { + s.Plugins = make(map[string]interface{}) } - if o.ServiceSettings.ConnectionSecurity == nil { - o.ServiceSettings.ConnectionSecurity = new(string) - *o.ServiceSettings.ConnectionSecurity = "" + if s.PluginStates == nil { + s.PluginStates = make(map[string]*PluginState) } +} + +type MessageExportSettings struct { + EnableExport *bool + DailyRunTime *string + ExportFromTimestamp *int64 + BatchSize *int +} - if o.ServiceSettings.TLSKeyFile == nil { - o.ServiceSettings.TLSKeyFile = new(string) - *o.ServiceSettings.TLSKeyFile = SERVICE_SETTINGS_DEFAULT_TLS_KEY_FILE +func (s *MessageExportSettings) SetDefaults() { + if s.EnableExport == nil { + s.EnableExport = NewBool(false) } - if o.ServiceSettings.TLSCertFile == nil { - o.ServiceSettings.TLSCertFile = new(string) - *o.ServiceSettings.TLSCertFile = SERVICE_SETTINGS_DEFAULT_TLS_CERT_FILE + if s.DailyRunTime == nil { + s.DailyRunTime = NewString("01:00") } - if o.ServiceSettings.UseLetsEncrypt == nil { - o.ServiceSettings.UseLetsEncrypt = new(bool) - *o.ServiceSettings.UseLetsEncrypt = false + if s.ExportFromTimestamp == nil { + s.ExportFromTimestamp = NewInt64(0) } - if o.ServiceSettings.LetsEncryptCertificateCacheFile == nil { - o.ServiceSettings.LetsEncryptCertificateCacheFile = new(string) - *o.ServiceSettings.LetsEncryptCertificateCacheFile = "./config/letsencrypt.cache" + if s.EnableExport != nil && *s.EnableExport && *s.ExportFromTimestamp == int64(0) { + // when the feature is enabled via the System Console, use the current timestamp as the start time for future exports + s.ExportFromTimestamp = NewInt64(GetMillis()) + } else if s.EnableExport != nil && !*s.EnableExport { + // when the feature is disabled, reset the timestamp so that the timestamp will be set if the feature is re-enabled + s.ExportFromTimestamp = NewInt64(0) } - if o.ServiceSettings.ReadTimeout == nil { - o.ServiceSettings.ReadTimeout = new(int) - *o.ServiceSettings.ReadTimeout = SERVICE_SETTINGS_DEFAULT_READ_TIMEOUT + if s.BatchSize == nil { + s.BatchSize = NewInt(10000) } +} + +type ConfigFunc func() *Config + +type Config struct { + ServiceSettings ServiceSettings + TeamSettings TeamSettings + ClientRequirements ClientRequirements + SqlSettings SqlSettings + LogSettings LogSettings + PasswordSettings PasswordSettings + FileSettings FileSettings + EmailSettings EmailSettings + RateLimitSettings RateLimitSettings + PrivacySettings PrivacySettings + SupportSettings SupportSettings + AnnouncementSettings AnnouncementSettings + ThemeSettings ThemeSettings + GitLabSettings SSOSettings + GoogleSettings SSOSettings + Office365Settings SSOSettings + LdapSettings LdapSettings + ComplianceSettings ComplianceSettings + LocalizationSettings LocalizationSettings + SamlSettings SamlSettings + NativeAppSettings NativeAppSettings + ClusterSettings ClusterSettings + MetricsSettings MetricsSettings + AnalyticsSettings AnalyticsSettings + WebrtcSettings WebrtcSettings + ElasticsearchSettings ElasticsearchSettings + DataRetentionSettings DataRetentionSettings + MessageExportSettings MessageExportSettings + JobSettings JobSettings + PluginSettings PluginSettings +} - if o.ServiceSettings.WriteTimeout == nil { - o.ServiceSettings.WriteTimeout = new(int) - *o.ServiceSettings.WriteTimeout = SERVICE_SETTINGS_DEFAULT_WRITE_TIMEOUT +func (o *Config) Clone() *Config { + var ret Config + if err := json.Unmarshal([]byte(o.ToJson()), &ret); err != nil { + panic(err) } + return &ret +} - if o.ServiceSettings.Forward80To443 == nil { - o.ServiceSettings.Forward80To443 = new(bool) - *o.ServiceSettings.Forward80To443 = false +func (o *Config) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) } +} - if o.MetricsSettings.BlockProfileRate == nil { - o.MetricsSettings.BlockProfileRate = new(int) - *o.MetricsSettings.BlockProfileRate = 0 +func (o *Config) GetSSOService(service string) *SSOSettings { + switch service { + case SERVICE_GITLAB: + return &o.GitLabSettings + case SERVICE_GOOGLE: + return &o.GoogleSettings + case SERVICE_OFFICE365: + return &o.Office365Settings } - if o.ServiceSettings.TimeBetweenUserTypingUpdatesMilliseconds == nil { - o.ServiceSettings.TimeBetweenUserTypingUpdatesMilliseconds = new(int64) - *o.ServiceSettings.TimeBetweenUserTypingUpdatesMilliseconds = 5000 + return nil +} + +func ConfigFromJson(data io.Reader) *Config { + decoder := json.NewDecoder(data) + var o Config + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil } +} + +func (o *Config) SetDefaults() { + o.LdapSettings.SetDefaults() + o.SamlSettings.SetDefaults() - if o.ServiceSettings.EnablePostSearch == nil { - o.ServiceSettings.EnablePostSearch = new(bool) - *o.ServiceSettings.EnablePostSearch = true + if o.TeamSettings.TeammateNameDisplay == nil { + o.TeamSettings.TeammateNameDisplay = NewString(SHOW_USERNAME) + + if *o.SamlSettings.Enable || *o.LdapSettings.Enable { + *o.TeamSettings.TeammateNameDisplay = SHOW_FULLNAME + } } - if o.ServiceSettings.EnableUserTypingMessages == nil { - o.ServiceSettings.EnableUserTypingMessages = new(bool) - *o.ServiceSettings.EnableUserTypingMessages = true + o.SqlSettings.SetDefaults() + o.FileSettings.SetDefaults() + o.EmailSettings.SetDefaults() + o.ServiceSettings.SetDefaults() + o.PasswordSettings.SetDefaults() + o.TeamSettings.SetDefaults() + o.MetricsSettings.SetDefaults() + o.SupportSettings.SetDefaults() + o.AnnouncementSettings.SetDefaults() + o.ThemeSettings.SetDefaults() + o.ClusterSettings.SetDefaults() + o.PluginSettings.SetDefaults() + o.AnalyticsSettings.SetDefaults() + o.ComplianceSettings.SetDefaults() + o.LocalizationSettings.SetDefaults() + o.ElasticsearchSettings.SetDefaults() + o.NativeAppSettings.SetDefaults() + o.DataRetentionSettings.SetDefaults() + o.RateLimitSettings.SetDefaults() + o.LogSettings.SetDefaults() + o.JobSettings.SetDefaults() + o.WebrtcSettings.SetDefaults() + o.MessageExportSettings.SetDefaults() +} + +func (o *Config) IsValid() *AppError { + if len(*o.ServiceSettings.SiteURL) == 0 && *o.EmailSettings.EnableEmailBatching { + return NewAppError("Config.IsValid", "model.config.is_valid.site_url_email_batching.app_error", nil, "", http.StatusBadRequest) } - if o.ServiceSettings.EnableChannelViewedMessages == nil { - o.ServiceSettings.EnableChannelViewedMessages = new(bool) - *o.ServiceSettings.EnableChannelViewedMessages = true + if *o.ClusterSettings.Enable && *o.EmailSettings.EnableEmailBatching { + return NewAppError("Config.IsValid", "model.config.is_valid.cluster_email_batching.app_error", nil, "", http.StatusBadRequest) } - if o.ServiceSettings.EnableUserStatuses == nil { - o.ServiceSettings.EnableUserStatuses = new(bool) - *o.ServiceSettings.EnableUserStatuses = true + if err := o.TeamSettings.isValid(); err != nil { + return err } - if o.ServiceSettings.ClusterLogTimeoutMilliseconds == nil { - o.ServiceSettings.ClusterLogTimeoutMilliseconds = new(int) - *o.ServiceSettings.ClusterLogTimeoutMilliseconds = 2000 + if err := o.SqlSettings.isValid(); err != nil { + return err } - if o.ElasticsearchSettings.ConnectionUrl == nil { - o.ElasticsearchSettings.ConnectionUrl = new(string) - *o.ElasticsearchSettings.ConnectionUrl = ELASTICSEARCH_SETTINGS_DEFAULT_CONNECTION_URL + if err := o.FileSettings.isValid(); err != nil { + return err } - if o.ElasticsearchSettings.Username == nil { - o.ElasticsearchSettings.Username = new(string) - *o.ElasticsearchSettings.Username = ELASTICSEARCH_SETTINGS_DEFAULT_USERNAME + if err := o.EmailSettings.isValid(); err != nil { + return err } - if o.ElasticsearchSettings.Password == nil { - o.ElasticsearchSettings.Password = new(string) - *o.ElasticsearchSettings.Password = ELASTICSEARCH_SETTINGS_DEFAULT_PASSWORD + if err := o.LdapSettings.isValid(); err != nil { + return err } - if o.ElasticsearchSettings.EnableIndexing == nil { - o.ElasticsearchSettings.EnableIndexing = new(bool) - *o.ElasticsearchSettings.EnableIndexing = false + if err := o.SamlSettings.isValid(); err != nil { + return err } - if o.ElasticsearchSettings.EnableSearching == nil { - o.ElasticsearchSettings.EnableSearching = new(bool) - *o.ElasticsearchSettings.EnableSearching = false + if *o.PasswordSettings.MinimumLength < PASSWORD_MINIMUM_LENGTH || *o.PasswordSettings.MinimumLength > PASSWORD_MAXIMUM_LENGTH { + return NewAppError("Config.IsValid", "model.config.is_valid.password_length.app_error", map[string]interface{}{"MinLength": PASSWORD_MINIMUM_LENGTH, "MaxLength": PASSWORD_MAXIMUM_LENGTH}, "", http.StatusBadRequest) } - if o.ElasticsearchSettings.Sniff == nil { - o.ElasticsearchSettings.Sniff = new(bool) - *o.ElasticsearchSettings.Sniff = true + if err := o.RateLimitSettings.isValid(); err != nil { + return err } - if o.ElasticsearchSettings.PostIndexReplicas == nil { - o.ElasticsearchSettings.PostIndexReplicas = new(int) - *o.ElasticsearchSettings.PostIndexReplicas = ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_REPLICAS + if err := o.WebrtcSettings.isValid(); err != nil { + return err } - if o.ElasticsearchSettings.PostIndexShards == nil { - o.ElasticsearchSettings.PostIndexShards = new(int) - *o.ElasticsearchSettings.PostIndexShards = ELASTICSEARCH_SETTINGS_DEFAULT_POST_INDEX_SHARDS + if err := o.ServiceSettings.isValid(); err != nil { + return err } - if o.DataRetentionSettings.Enable == nil { - o.DataRetentionSettings.Enable = new(bool) - *o.DataRetentionSettings.Enable = false + if err := o.ElasticsearchSettings.isValid(); err != nil { + return err } - if o.JobSettings.RunJobs == nil { - o.JobSettings.RunJobs = new(bool) - *o.JobSettings.RunJobs = true + if err := o.DataRetentionSettings.isValid(); err != nil { + return err } - if o.JobSettings.RunScheduler == nil { - o.JobSettings.RunScheduler = new(bool) - *o.JobSettings.RunScheduler = true + if err := o.LocalizationSettings.isValid(); err != nil { + return err } - if o.PluginSettings.Plugins == nil { - o.PluginSettings.Plugins = make(map[string]interface{}) + if err := o.MessageExportSettings.isValid(o.FileSettings); err != nil { + return err } - o.defaultWebrtcSettings() + return nil } -func (o *Config) IsValid() *AppError { - - if o.ServiceSettings.MaximumLoginAttempts <= 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.login_attempts.app_error", nil, "") +func (ts *TeamSettings) isValid() *AppError { + if *ts.MaxUsersPerTeam <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.max_users.app_error", nil, "", http.StatusBadRequest) } - if len(*o.ServiceSettings.SiteURL) != 0 { - if _, err := url.ParseRequestURI(*o.ServiceSettings.SiteURL); err != nil { - return NewLocAppError("Config.IsValid", "model.config.is_valid.site_url.app_error", nil, "") - } + if *ts.MaxChannelsPerTeam <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.max_channels.app_error", nil, "", http.StatusBadRequest) } - if len(o.ServiceSettings.ListenAddress) == 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.listen_address.app_error", nil, "") + if *ts.MaxNotificationsPerChannel <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.max_notify_per_channel.app_error", nil, "", http.StatusBadRequest) } - if *o.ClusterSettings.Enable && *o.EmailSettings.EnableEmailBatching { - return NewLocAppError("Config.IsValid", "model.config.is_valid.cluster_email_batching.app_error", nil, "") + if !(*ts.RestrictDirectMessage == DIRECT_MESSAGE_ANY || *ts.RestrictDirectMessage == DIRECT_MESSAGE_TEAM) { + return NewAppError("Config.IsValid", "model.config.is_valid.restrict_direct_message.app_error", nil, "", http.StatusBadRequest) } - if len(*o.ServiceSettings.SiteURL) == 0 && *o.EmailSettings.EnableEmailBatching { - return NewLocAppError("Config.IsValid", "model.config.is_valid.site_url_email_batching.app_error", nil, "") + if !(*ts.TeammateNameDisplay == SHOW_FULLNAME || *ts.TeammateNameDisplay == SHOW_NICKNAME_FULLNAME || *ts.TeammateNameDisplay == SHOW_USERNAME) { + return NewAppError("Config.IsValid", "model.config.is_valid.teammate_name_display.app_error", nil, "", http.StatusBadRequest) } - if o.TeamSettings.MaxUsersPerTeam <= 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.max_users.app_error", nil, "") + if len(ts.SiteName) > SITENAME_MAX_LENGTH { + return NewAppError("Config.IsValid", "model.config.is_valid.sitename_length.app_error", map[string]interface{}{"MaxLength": SITENAME_MAX_LENGTH}, "", http.StatusBadRequest) } - if *o.TeamSettings.MaxChannelsPerTeam <= 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.max_channels.app_error", nil, "") - } + return nil +} - if *o.TeamSettings.MaxNotificationsPerChannel <= 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.max_notify_per_channel.app_error", nil, "") +func (ss *SqlSettings) isValid() *AppError { + if len(ss.AtRestEncryptKey) < 32 { + return NewAppError("Config.IsValid", "model.config.is_valid.encrypt_sql.app_error", nil, "", http.StatusBadRequest) } - if !(*o.TeamSettings.RestrictDirectMessage == DIRECT_MESSAGE_ANY || *o.TeamSettings.RestrictDirectMessage == DIRECT_MESSAGE_TEAM) { - return NewLocAppError("Config.IsValid", "model.config.is_valid.restrict_direct_message.app_error", nil, "") + if !(*ss.DriverName == DATABASE_DRIVER_MYSQL || *ss.DriverName == DATABASE_DRIVER_POSTGRES) { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_driver.app_error", nil, "", http.StatusBadRequest) } - if !(*o.TeamSettings.TeammateNameDisplay == SHOW_FULLNAME || *o.TeamSettings.TeammateNameDisplay == SHOW_NICKNAME_FULLNAME || *o.TeamSettings.TeammateNameDisplay == SHOW_USERNAME) { - return NewLocAppError("Config.IsValid", "model.config.is_valid.teammate_name_display.app_error", nil, "") + if *ss.MaxIdleConns <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_idle.app_error", nil, "", http.StatusBadRequest) } - if len(o.SqlSettings.AtRestEncryptKey) < 32 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.encrypt_sql.app_error", nil, "") + if *ss.QueryTimeout <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_query_timeout.app_error", nil, "", http.StatusBadRequest) } - if !(o.SqlSettings.DriverName == DATABASE_DRIVER_MYSQL || o.SqlSettings.DriverName == DATABASE_DRIVER_POSTGRES) { - return NewLocAppError("Config.IsValid", "model.config.is_valid.sql_driver.app_error", nil, "") + if len(*ss.DataSource) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_data_src.app_error", nil, "", http.StatusBadRequest) } - if o.SqlSettings.MaxIdleConns <= 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.sql_idle.app_error", nil, "") + if *ss.MaxOpenConns <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.sql_max_conn.app_error", nil, "", http.StatusBadRequest) } - if *o.SqlSettings.QueryTimeout <= 0 { - return NewAppError("Config.IsValid", "model.config.is_valid.sql_query_timeout.app_error", nil, "", http.StatusBadRequest) - } + return nil +} - if len(o.SqlSettings.DataSource) == 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.sql_data_src.app_error", nil, "") +func (fs *FileSettings) isValid() *AppError { + if *fs.MaxFileSize <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.max_file_size.app_error", nil, "", http.StatusBadRequest) } - if o.SqlSettings.MaxOpenConns <= 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.sql_max_conn.app_error", nil, "") + if !(*fs.DriverName == IMAGE_DRIVER_LOCAL || *fs.DriverName == IMAGE_DRIVER_S3) { + return NewAppError("Config.IsValid", "model.config.is_valid.file_driver.app_error", nil, "", http.StatusBadRequest) } - if *o.FileSettings.MaxFileSize <= 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.max_file_size.app_error", nil, "") + if len(*fs.PublicLinkSalt) < 32 { + return NewAppError("Config.IsValid", "model.config.is_valid.file_salt.app_error", nil, "", http.StatusBadRequest) } - if !(o.FileSettings.DriverName == IMAGE_DRIVER_LOCAL || o.FileSettings.DriverName == IMAGE_DRIVER_S3) { - return NewLocAppError("Config.IsValid", "model.config.is_valid.file_driver.app_error", nil, "") - } + return nil +} - if len(*o.FileSettings.PublicLinkSalt) < 32 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.file_salt.app_error", nil, "") +func (es *EmailSettings) isValid() *AppError { + if !(es.ConnectionSecurity == CONN_SECURITY_NONE || es.ConnectionSecurity == CONN_SECURITY_TLS || es.ConnectionSecurity == CONN_SECURITY_STARTTLS || es.ConnectionSecurity == CONN_SECURITY_PLAIN) { + return NewAppError("Config.IsValid", "model.config.is_valid.email_security.app_error", nil, "", http.StatusBadRequest) } - if !(o.EmailSettings.ConnectionSecurity == CONN_SECURITY_NONE || o.EmailSettings.ConnectionSecurity == CONN_SECURITY_TLS || o.EmailSettings.ConnectionSecurity == CONN_SECURITY_STARTTLS || o.EmailSettings.ConnectionSecurity == CONN_SECURITY_PLAIN) { - return NewLocAppError("Config.IsValid", "model.config.is_valid.email_security.app_error", nil, "") + if len(es.InviteSalt) < 32 { + return NewAppError("Config.IsValid", "model.config.is_valid.email_salt.app_error", nil, "", http.StatusBadRequest) } - if len(o.EmailSettings.InviteSalt) < 32 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.email_salt.app_error", nil, "") + if *es.EmailBatchingBufferSize <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.email_batching_buffer_size.app_error", nil, "", http.StatusBadRequest) } - if *o.EmailSettings.EmailBatchingBufferSize <= 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.email_batching_buffer_size.app_error", nil, "") + if *es.EmailBatchingInterval < 30 { + return NewAppError("Config.IsValid", "model.config.is_valid.email_batching_interval.app_error", nil, "", http.StatusBadRequest) } - if *o.EmailSettings.EmailBatchingInterval < 30 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.email_batching_interval.app_error", nil, "") + if !(*es.EmailNotificationContentsType == EMAIL_NOTIFICATION_CONTENTS_FULL || *es.EmailNotificationContentsType == EMAIL_NOTIFICATION_CONTENTS_GENERIC) { + return NewAppError("Config.IsValid", "model.config.is_valid.email_notification_contents_type.app_error", nil, "", http.StatusBadRequest) } - if !(*o.EmailSettings.EmailNotificationContentsType == EMAIL_NOTIFICATION_CONTENTS_FULL || *o.EmailSettings.EmailNotificationContentsType == EMAIL_NOTIFICATION_CONTENTS_GENERIC) { - return NewLocAppError("Config.IsValid", "model.config.is_valid.email_notification_contents_type.app_error", nil, "") + return nil +} + +func (rls *RateLimitSettings) isValid() *AppError { + if *rls.MemoryStoreSize <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.rate_mem.app_error", nil, "", http.StatusBadRequest) } - if o.RateLimitSettings.MemoryStoreSize <= 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.rate_mem.app_error", nil, "") + if *rls.PerSec <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.rate_sec.app_error", nil, "", http.StatusBadRequest) } - if o.RateLimitSettings.PerSec <= 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.rate_sec.app_error", nil, "") + if *rls.MaxBurst <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.max_burst.app_error", nil, "", http.StatusBadRequest) } - if !(*o.LdapSettings.ConnectionSecurity == CONN_SECURITY_NONE || *o.LdapSettings.ConnectionSecurity == CONN_SECURITY_TLS || *o.LdapSettings.ConnectionSecurity == CONN_SECURITY_STARTTLS) { - return NewLocAppError("Config.IsValid", "model.config.is_valid.ldap_security.app_error", nil, "") + return nil +} + +func (ls *LdapSettings) isValid() *AppError { + if !(*ls.ConnectionSecurity == CONN_SECURITY_NONE || *ls.ConnectionSecurity == CONN_SECURITY_TLS || *ls.ConnectionSecurity == CONN_SECURITY_STARTTLS) { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_security.app_error", nil, "", http.StatusBadRequest) } - if *o.LdapSettings.SyncIntervalMinutes <= 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.ldap_sync_interval.app_error", nil, "") + if *ls.SyncIntervalMinutes <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_sync_interval.app_error", nil, "", http.StatusBadRequest) } - if *o.LdapSettings.MaxPageSize < 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.ldap_max_page_size.app_error", nil, "") + if *ls.MaxPageSize < 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_max_page_size.app_error", nil, "", http.StatusBadRequest) } - if *o.LdapSettings.Enable { - if *o.LdapSettings.LdapServer == "" { - return NewLocAppError("Config.IsValid", "model.config.is_valid.ldap_server", nil, "") + if *ls.Enable { + if *ls.LdapServer == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_server", nil, "", http.StatusBadRequest) } - if *o.LdapSettings.BaseDN == "" { - return NewLocAppError("Config.IsValid", "model.config.is_valid.ldap_basedn", nil, "") + if *ls.BaseDN == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_basedn", nil, "", http.StatusBadRequest) } - if *o.LdapSettings.EmailAttribute == "" { - return NewLocAppError("Config.IsValid", "model.config.is_valid.ldap_email", nil, "") + if *ls.EmailAttribute == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_email", nil, "", http.StatusBadRequest) } - if *o.LdapSettings.UsernameAttribute == "" { - return NewLocAppError("Config.IsValid", "model.config.is_valid.ldap_username", nil, "") + if *ls.UsernameAttribute == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_username", nil, "", http.StatusBadRequest) } - if *o.LdapSettings.IdAttribute == "" { - return NewLocAppError("Config.IsValid", "model.config.is_valid.ldap_id", nil, "") + if *ls.IdAttribute == "" { + return NewAppError("Config.IsValid", "model.config.is_valid.ldap_id", nil, "", http.StatusBadRequest) } } - if *o.SamlSettings.Enable { - if len(*o.SamlSettings.IdpUrl) == 0 || !IsValidHttpUrl(*o.SamlSettings.IdpUrl) { - return NewLocAppError("Config.IsValid", "model.config.is_valid.saml_idp_url.app_error", nil, "") + return nil +} + +func (ss *SamlSettings) isValid() *AppError { + if *ss.Enable { + if len(*ss.IdpUrl) == 0 || !IsValidHttpUrl(*ss.IdpUrl) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_url.app_error", nil, "", http.StatusBadRequest) } - if len(*o.SamlSettings.IdpDescriptorUrl) == 0 || !IsValidHttpUrl(*o.SamlSettings.IdpDescriptorUrl) { - return NewLocAppError("Config.IsValid", "model.config.is_valid.saml_idp_descriptor_url.app_error", nil, "") + if len(*ss.IdpDescriptorUrl) == 0 || !IsValidHttpUrl(*ss.IdpDescriptorUrl) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_descriptor_url.app_error", nil, "", http.StatusBadRequest) } - if len(*o.SamlSettings.IdpCertificateFile) == 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.saml_idp_cert.app_error", nil, "") + if len(*ss.IdpCertificateFile) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_idp_cert.app_error", nil, "", http.StatusBadRequest) } - if len(*o.SamlSettings.EmailAttribute) == 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.saml_email_attribute.app_error", nil, "") + if len(*ss.EmailAttribute) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_email_attribute.app_error", nil, "", http.StatusBadRequest) } - if len(*o.SamlSettings.UsernameAttribute) == 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.saml_username_attribute.app_error", nil, "") + if len(*ss.UsernameAttribute) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_username_attribute.app_error", nil, "", http.StatusBadRequest) } - if *o.SamlSettings.Verify { - if len(*o.SamlSettings.AssertionConsumerServiceURL) == 0 || !IsValidHttpUrl(*o.SamlSettings.AssertionConsumerServiceURL) { - return NewLocAppError("Config.IsValid", "model.config.is_valid.saml_assertion_consumer_service_url.app_error", nil, "") + if *ss.Verify { + if len(*ss.AssertionConsumerServiceURL) == 0 || !IsValidHttpUrl(*ss.AssertionConsumerServiceURL) { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_assertion_consumer_service_url.app_error", nil, "", http.StatusBadRequest) } } - if *o.SamlSettings.Encrypt { - if len(*o.SamlSettings.PrivateKeyFile) == 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.saml_private_key.app_error", nil, "") + if *ss.Encrypt { + if len(*ss.PrivateKeyFile) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_private_key.app_error", nil, "", http.StatusBadRequest) } - if len(*o.SamlSettings.PublicCertificateFile) == 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.saml_public_cert.app_error", nil, "") + if len(*ss.PublicCertificateFile) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_public_cert.app_error", nil, "", http.StatusBadRequest) } } - if len(*o.SamlSettings.EmailAttribute) == 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.saml_email_attribute.app_error", nil, "") + if len(*ss.EmailAttribute) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.saml_email_attribute.app_error", nil, "", http.StatusBadRequest) } } - if *o.PasswordSettings.MinimumLength < PASSWORD_MINIMUM_LENGTH || *o.PasswordSettings.MinimumLength > PASSWORD_MAXIMUM_LENGTH { - return NewLocAppError("Config.IsValid", "model.config.is_valid.password_length.app_error", map[string]interface{}{"MinLength": PASSWORD_MINIMUM_LENGTH, "MaxLength": PASSWORD_MAXIMUM_LENGTH}, "") + return nil +} + +func (ws *WebrtcSettings) isValid() *AppError { + if *ws.Enable { + if len(*ws.GatewayWebsocketUrl) == 0 || !IsValidWebsocketUrl(*ws.GatewayWebsocketUrl) { + return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_gateway_ws_url.app_error", nil, "", http.StatusBadRequest) + } else if len(*ws.GatewayAdminUrl) == 0 || !IsValidHttpUrl(*ws.GatewayAdminUrl) { + return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_gateway_admin_url.app_error", nil, "", http.StatusBadRequest) + } else if len(*ws.GatewayAdminSecret) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_gateway_admin_secret.app_error", nil, "", http.StatusBadRequest) + } else if len(*ws.StunURI) != 0 && !IsValidTurnOrStunServer(*ws.StunURI) { + return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_stun_uri.app_error", nil, "", http.StatusBadRequest) + } else if len(*ws.TurnURI) != 0 { + if !IsValidTurnOrStunServer(*ws.TurnURI) { + return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_turn_uri.app_error", nil, "", http.StatusBadRequest) + } + if len(*ws.TurnUsername) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_turn_username.app_error", nil, "", http.StatusBadRequest) + } else if len(*ws.TurnSharedKey) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.webrtc_turn_shared_key.app_error", nil, "", http.StatusBadRequest) + } + } } - if len(o.TeamSettings.SiteName) > SITENAME_MAX_LENGTH { - return NewLocAppError("Config.IsValid", "model.config.is_valid.sitename_length.app_error", map[string]interface{}{"MaxLength": SITENAME_MAX_LENGTH}, "") + return nil +} + +func (ss *ServiceSettings) isValid() *AppError { + if !(*ss.ConnectionSecurity == CONN_SECURITY_NONE || *ss.ConnectionSecurity == CONN_SECURITY_TLS) { + return NewAppError("Config.IsValid", "model.config.is_valid.webserver_security.app_error", nil, "", http.StatusBadRequest) } - if *o.RateLimitSettings.MaxBurst <= 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.max_burst.app_error", nil, "") + if *ss.ReadTimeout <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.read_timeout.app_error", nil, "", http.StatusBadRequest) } - if err := o.isValidWebrtcSettings(); err != nil { - return err + if *ss.WriteTimeout <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.write_timeout.app_error", nil, "", http.StatusBadRequest) } - if !(*o.ServiceSettings.ConnectionSecurity == CONN_SECURITY_NONE || *o.ServiceSettings.ConnectionSecurity == CONN_SECURITY_TLS) { - return NewLocAppError("Config.IsValid", "model.config.is_valid.webserver_security.app_error", nil, "") + if *ss.TimeBetweenUserTypingUpdatesMilliseconds < 1000 { + return NewAppError("Config.IsValid", "model.config.is_valid.time_between_user_typing.app_error", nil, "", http.StatusBadRequest) } - if *o.ServiceSettings.ReadTimeout <= 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.read_timeout.app_error", nil, "") + if *ss.MaximumLoginAttempts <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.login_attempts.app_error", nil, "", http.StatusBadRequest) } - if *o.ServiceSettings.WriteTimeout <= 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.write_timeout.app_error", nil, "") + if len(*ss.SiteURL) != 0 { + if _, err := url.ParseRequestURI(*ss.SiteURL); err != nil { + return NewAppError("Config.IsValid", "model.config.is_valid.site_url.app_error", nil, "", http.StatusBadRequest) + } } - if *o.ServiceSettings.TimeBetweenUserTypingUpdatesMilliseconds < 1000 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.time_between_user_typing.app_error", nil, "") + if len(*ss.ListenAddress) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.listen_address.app_error", nil, "", http.StatusBadRequest) } - if *o.ElasticsearchSettings.EnableIndexing { - if len(*o.ElasticsearchSettings.ConnectionUrl) == 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.elastic_search.connection_url.app_error", nil, "") + return nil +} + +func (ess *ElasticsearchSettings) isValid() *AppError { + if *ess.EnableIndexing { + if len(*ess.ConnectionUrl) == 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.connection_url.app_error", nil, "", http.StatusBadRequest) } } - if *o.ElasticsearchSettings.EnableSearching && !*o.ElasticsearchSettings.EnableIndexing { - return NewLocAppError("Config.IsValid", "model.config.is_valid.elastic_search.enable_searching.app_error", nil, "") + if *ess.EnableSearching && !*ess.EnableIndexing { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.enable_searching.app_error", nil, "", http.StatusBadRequest) + } + + if *ess.AggregatePostsAfterDays < 1 { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.aggregate_posts_after_days.app_error", nil, "", http.StatusBadRequest) + } + + if _, err := time.Parse("15:04", *ess.PostsAggregatorJobStartTime); err != nil { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.posts_aggregator_job_start_time.app_error", nil, err.Error(), http.StatusBadRequest) + } + + if *ess.LiveIndexingBatchSize < 1 { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.live_indexing_batch_size.app_error", nil, "", http.StatusBadRequest) } + if *ess.BulkIndexingTimeWindowSeconds < 1 { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.bulk_indexing_time_window_seconds.app_error", nil, "", http.StatusBadRequest) + } + + if *ess.RequestTimeoutSeconds < 1 { + return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.request_timeout_seconds.app_error", nil, "", http.StatusBadRequest) + } + + return nil +} + +func (drs *DataRetentionSettings) isValid() *AppError { + if *drs.MessageRetentionDays <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.message_retention_days_too_low.app_error", nil, "", http.StatusBadRequest) + } + + if *drs.FileRetentionDays <= 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.file_retention_days_too_low.app_error", nil, "", http.StatusBadRequest) + } + + if _, err := time.Parse("15:04", *drs.DeletionJobStartTime); err != nil { + return NewAppError("Config.IsValid", "model.config.is_valid.data_retention.deletion_job_start_time.app_error", nil, err.Error(), http.StatusBadRequest) + } + + return nil +} + +func (ls *LocalizationSettings) isValid() *AppError { + if len(*ls.AvailableLocales) > 0 { + if !strings.Contains(*ls.AvailableLocales, *ls.DefaultClientLocale) { + return NewAppError("Config.IsValid", "model.config.is_valid.localization.available_locales.app_error", nil, "", http.StatusBadRequest) + } + } + + return nil +} + +func (mes *MessageExportSettings) isValid(fs FileSettings) *AppError { + if mes.EnableExport == nil { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.enable.app_error", nil, "", http.StatusBadRequest) + } + if *mes.EnableExport { + if mes.ExportFromTimestamp == nil || *mes.ExportFromTimestamp < 0 || *mes.ExportFromTimestamp > GetMillis() { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.export_from.app_error", nil, "", http.StatusBadRequest) + } else if mes.DailyRunTime == nil { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.daily_runtime.app_error", nil, "", http.StatusBadRequest) + } else if _, err := time.Parse("15:04", *mes.DailyRunTime); err != nil { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.daily_runtime.app_error", nil, err.Error(), http.StatusBadRequest) + } else if mes.BatchSize == nil || *mes.BatchSize < 0 { + return NewAppError("Config.IsValid", "model.config.is_valid.message_export.batch_size.app_error", nil, "", http.StatusBadRequest) + } + } return nil } @@ -1725,7 +2153,7 @@ func (o *Config) Sanitize() { o.GitLabSettings.Secret = FAKE_SETTING } - o.SqlSettings.DataSource = FAKE_SETTING + *o.SqlSettings.DataSource = FAKE_SETTING o.SqlSettings.AtRestEncryptKey = FAKE_SETTING for i := range o.SqlSettings.DataSourceReplicas { @@ -1738,71 +2166,3 @@ func (o *Config) Sanitize() { *o.ElasticsearchSettings.Password = FAKE_SETTING } - -func (o *Config) defaultWebrtcSettings() { - if o.WebrtcSettings.Enable == nil { - o.WebrtcSettings.Enable = new(bool) - *o.WebrtcSettings.Enable = false - } - - if o.WebrtcSettings.GatewayWebsocketUrl == nil { - o.WebrtcSettings.GatewayWebsocketUrl = new(string) - *o.WebrtcSettings.GatewayWebsocketUrl = "" - } - - if o.WebrtcSettings.GatewayAdminUrl == nil { - o.WebrtcSettings.GatewayAdminUrl = new(string) - *o.WebrtcSettings.GatewayAdminUrl = "" - } - - if o.WebrtcSettings.GatewayAdminSecret == nil { - o.WebrtcSettings.GatewayAdminSecret = new(string) - *o.WebrtcSettings.GatewayAdminSecret = "" - } - - if o.WebrtcSettings.StunURI == nil { - o.WebrtcSettings.StunURI = new(string) - *o.WebrtcSettings.StunURI = WEBRTC_SETTINGS_DEFAULT_STUN_URI - } - - if o.WebrtcSettings.TurnURI == nil { - o.WebrtcSettings.TurnURI = new(string) - *o.WebrtcSettings.TurnURI = WEBRTC_SETTINGS_DEFAULT_TURN_URI - } - - if o.WebrtcSettings.TurnUsername == nil { - o.WebrtcSettings.TurnUsername = new(string) - *o.WebrtcSettings.TurnUsername = "" - } - - if o.WebrtcSettings.TurnSharedKey == nil { - o.WebrtcSettings.TurnSharedKey = new(string) - *o.WebrtcSettings.TurnSharedKey = "" - } -} - -func (o *Config) isValidWebrtcSettings() *AppError { - if *o.WebrtcSettings.Enable { - if len(*o.WebrtcSettings.GatewayWebsocketUrl) == 0 || !IsValidWebsocketUrl(*o.WebrtcSettings.GatewayWebsocketUrl) { - return NewLocAppError("Config.IsValid", "model.config.is_valid.webrtc_gateway_ws_url.app_error", nil, "") - } else if len(*o.WebrtcSettings.GatewayAdminUrl) == 0 || !IsValidHttpUrl(*o.WebrtcSettings.GatewayAdminUrl) { - return NewLocAppError("Config.IsValid", "model.config.is_valid.webrtc_gateway_admin_url.app_error", nil, "") - } else if len(*o.WebrtcSettings.GatewayAdminSecret) == 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.webrtc_gateway_admin_secret.app_error", nil, "") - } else if len(*o.WebrtcSettings.StunURI) != 0 && !IsValidTurnOrStunServer(*o.WebrtcSettings.StunURI) { - return NewLocAppError("Config.IsValid", "model.config.is_valid.webrtc_stun_uri.app_error", nil, "") - } else if len(*o.WebrtcSettings.TurnURI) != 0 { - if !IsValidTurnOrStunServer(*o.WebrtcSettings.TurnURI) { - return NewLocAppError("Config.IsValid", "model.config.is_valid.webrtc_turn_uri.app_error", nil, "") - } - if len(*o.WebrtcSettings.TurnUsername) == 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.webrtc_turn_username.app_error", nil, "") - } else if len(*o.WebrtcSettings.TurnSharedKey) == 0 { - return NewLocAppError("Config.IsValid", "model.config.is_valid.webrtc_turn_shared_key.app_error", nil, "") - } - - } - } - - return nil -} diff --git a/vendor/github.com/mattermost/platform/model/data_retention_policy.go b/vendor/github.com/mattermost/platform/model/data_retention_policy.go new file mode 100644 index 00000000..7284477e --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/data_retention_policy.go @@ -0,0 +1,36 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type DataRetentionPolicy struct { + MessageDeletionEnabled bool `json:"message_deletion_enabled"` + FileDeletionEnabled bool `json:"file_deletion_enabled"` + MessageRetentionCutoff int64 `json:"message_retention_cutoff"` + FileRetentionCutoff int64 `json:"file_retention_cutoff"` +} + +func (me *DataRetentionPolicy) ToJson() string { + b, err := json.Marshal(me) + if err != nil { + return "" + } else { + return string(b) + } +} + +func DataRetentionPolicyFromJson(data io.Reader) *DataRetentionPolicy { + decoder := json.NewDecoder(data) + var me DataRetentionPolicy + err := decoder.Decode(&me) + if err == nil { + return &me + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/emoji.go b/vendor/github.com/mattermost/platform/model/emoji.go index 5ade868c..272616d9 100644 --- a/vendor/github.com/mattermost/platform/model/emoji.go +++ b/vendor/github.com/mattermost/platform/model/emoji.go @@ -6,6 +6,7 @@ package model import ( "encoding/json" "io" + "net/http" ) type Emoji struct { @@ -19,23 +20,23 @@ type Emoji struct { func (emoji *Emoji) IsValid() *AppError { if len(emoji.Id) != 26 { - return NewLocAppError("Emoji.IsValid", "model.emoji.id.app_error", nil, "") + return NewAppError("Emoji.IsValid", "model.emoji.id.app_error", nil, "", http.StatusBadRequest) } if emoji.CreateAt == 0 { - return NewLocAppError("Emoji.IsValid", "model.emoji.create_at.app_error", nil, "id="+emoji.Id) + return NewAppError("Emoji.IsValid", "model.emoji.create_at.app_error", nil, "id="+emoji.Id, http.StatusBadRequest) } if emoji.UpdateAt == 0 { - return NewLocAppError("Emoji.IsValid", "model.emoji.update_at.app_error", nil, "id="+emoji.Id) + return NewAppError("Emoji.IsValid", "model.emoji.update_at.app_error", nil, "id="+emoji.Id, http.StatusBadRequest) } if len(emoji.CreatorId) != 26 { - return NewLocAppError("Emoji.IsValid", "model.emoji.user_id.app_error", nil, "") + return NewAppError("Emoji.IsValid", "model.emoji.user_id.app_error", nil, "", http.StatusBadRequest) } if len(emoji.Name) == 0 || len(emoji.Name) > 64 || !IsValidAlphaNumHyphenUnderscore(emoji.Name, false) { - return NewLocAppError("Emoji.IsValid", "model.emoji.name.app_error", nil, "") + return NewAppError("Emoji.IsValid", "model.emoji.name.app_error", nil, "", http.StatusBadRequest) } return nil diff --git a/vendor/github.com/mattermost/platform/model/file_info.go b/vendor/github.com/mattermost/platform/model/file_info.go index 8b568412..0ee2c50d 100644 --- a/vendor/github.com/mattermost/platform/model/file_info.go +++ b/vendor/github.com/mattermost/platform/model/file_info.go @@ -10,6 +10,7 @@ import ( "image/gif" "io" "mime" + "net/http" "path/filepath" "strings" ) @@ -80,33 +81,36 @@ func (o *FileInfo) PreSave() { if o.CreateAt == 0 { o.CreateAt = GetMillis() + } + + if o.UpdateAt < o.CreateAt { o.UpdateAt = o.CreateAt } } func (o *FileInfo) IsValid() *AppError { if len(o.Id) != 26 { - return NewLocAppError("FileInfo.IsValid", "model.file_info.is_valid.id.app_error", nil, "") + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.id.app_error", nil, "", http.StatusBadRequest) } if len(o.CreatorId) != 26 { - return NewLocAppError("FileInfo.IsValid", "model.file_info.is_valid.user_id.app_error", nil, "id="+o.Id) + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.user_id.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if len(o.PostId) != 0 && len(o.PostId) != 26 { - return NewLocAppError("FileInfo.IsValid", "model.file_info.is_valid.post_id.app_error", nil, "id="+o.Id) + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.post_id.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if o.CreateAt == 0 { - return NewLocAppError("FileInfo.IsValid", "model.file_info.is_valid.create_at.app_error", nil, "id="+o.Id) + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if o.UpdateAt == 0 { - return NewLocAppError("FileInfo.IsValid", "model.file_info.is_valid.update_at.app_error", nil, "id="+o.Id) + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if o.Path == "" { - return NewLocAppError("FileInfo.IsValid", "model.file_info.is_valid.path.app_error", nil, "id="+o.Id) + return NewAppError("FileInfo.IsValid", "model.file_info.is_valid.path.app_error", nil, "id="+o.Id, http.StatusBadRequest) } return nil @@ -144,7 +148,7 @@ func GetInfoForBytes(name string, data []byte) (*FileInfo, *AppError) { if gifConfig, err := gif.DecodeAll(bytes.NewReader(data)); err != nil { // Still return the rest of the info even though it doesn't appear to be an actual gif info.HasPreviewImage = true - err = NewLocAppError("GetInfoForBytes", "model.file_info.get.gif.app_error", nil, "name="+name) + err = NewAppError("GetInfoForBytes", "model.file_info.get.gif.app_error", nil, "name="+name, http.StatusBadRequest) } else { info.HasPreviewImage = len(gifConfig.Image) == 1 } diff --git a/vendor/github.com/mattermost/platform/model/gitlab/gitlab.go b/vendor/github.com/mattermost/platform/model/gitlab/gitlab.go index 7f1447ed..7e0cb10a 100644 --- a/vendor/github.com/mattermost/platform/model/gitlab/gitlab.go +++ b/vendor/github.com/mattermost/platform/model/gitlab/gitlab.go @@ -5,11 +5,12 @@ package oauthgitlab import ( "encoding/json" - "github.com/mattermost/platform/einterfaces" - "github.com/mattermost/platform/model" "io" "strconv" "strings" + + "github.com/mattermost/mattermost-server/einterfaces" + "github.com/mattermost/mattermost-server/model" ) type GitLabProvider struct { @@ -45,7 +46,6 @@ func userFromGitLabUser(glu *GitLabUser) *model.User { } else { user.FirstName = glu.Name } - strings.TrimSpace(user.Email) user.Email = glu.Email userId := strconv.FormatInt(glu.Id, 10) user.AuthData = &userId diff --git a/vendor/github.com/mattermost/platform/model/incoming_webhook.go b/vendor/github.com/mattermost/platform/model/incoming_webhook.go index ce755f88..3e0488d2 100644 --- a/vendor/github.com/mattermost/platform/model/incoming_webhook.go +++ b/vendor/github.com/mattermost/platform/model/incoming_webhook.go @@ -25,6 +25,8 @@ type IncomingWebhook struct { TeamId string `json:"team_id"` DisplayName string `json:"display_name"` Description string `json:"description"` + Username string `json:"username"` + IconURL string `json:"icon_url"` } type IncomingWebhookRequest struct { @@ -112,6 +114,14 @@ func (o *IncomingWebhook) IsValid() *AppError { return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.description.app_error", nil, "", http.StatusBadRequest) } + if len(o.Username) > 64 { + return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.username.app_error", nil, "", http.StatusBadRequest) + } + + if len(o.IconURL) > 1024 { + return NewAppError("IncomingWebhook.IsValid", "model.incoming_hook.icon_url.app_error", nil, "", http.StatusBadRequest) + } + return nil } @@ -193,7 +203,7 @@ func decodeIncomingWebhookRequest(by []byte) (*IncomingWebhookRequest, error) { } } -func IncomingWebhookRequestFromJson(data io.Reader) *IncomingWebhookRequest { +func IncomingWebhookRequestFromJson(data io.Reader) (*IncomingWebhookRequest, *AppError) { buf := new(bytes.Buffer) buf.ReadFrom(data) by := buf.Bytes() @@ -204,12 +214,11 @@ func IncomingWebhookRequestFromJson(data io.Reader) *IncomingWebhookRequest { if err != nil { o, err = decodeIncomingWebhookRequest(escapeControlCharsFromPayload(by)) if err != nil { - return nil + return nil, NewAppError("IncomingWebhookRequestFromJson", "Unable to parse incoming data", nil, err.Error(), http.StatusBadRequest) } } - o.Text = ExpandAnnouncement(o.Text) - o.Attachments = ProcessSlackAttachments(o.Attachments) + o.Attachments = StringifySlackFieldValue(o.Attachments) - return o + return o, nil } diff --git a/vendor/github.com/mattermost/platform/model/job.go b/vendor/github.com/mattermost/platform/model/job.go index 004331a1..9a756602 100644 --- a/vendor/github.com/mattermost/platform/model/job.go +++ b/vendor/github.com/mattermost/platform/model/job.go @@ -7,11 +7,15 @@ import ( "encoding/json" "io" "net/http" + "time" ) const ( - JOB_TYPE_DATA_RETENTION = "data_retention" - JOB_TYPE_ELASTICSEARCH_POST_INDEXING = "elasticsearch_post_indexing" + JOB_TYPE_DATA_RETENTION = "data_retention" + JOB_TYPE_MESSAGE_EXPORT = "message_export" + JOB_TYPE_ELASTICSEARCH_POST_INDEXING = "elasticsearch_post_indexing" + JOB_TYPE_ELASTICSEARCH_POST_AGGREGATION = "elasticsearch_post_aggregation" + JOB_TYPE_LDAP_SYNC = "ldap_sync" JOB_STATUS_PENDING = "pending" JOB_STATUS_IN_PROGRESS = "in_progress" @@ -22,15 +26,15 @@ const ( ) type Job struct { - Id string `json:"id"` - Type string `json:"type"` - Priority int64 `json:"priority"` - CreateAt int64 `json:"create_at"` - StartAt int64 `json:"start_at"` - LastActivityAt int64 `json:"last_activity_at"` - Status string `json:"status"` - Progress int64 `json:"progress"` - Data map[string]interface{} `json:"data"` + Id string `json:"id"` + Type string `json:"type"` + Priority int64 `json:"priority"` + CreateAt int64 `json:"create_at"` + StartAt int64 `json:"start_at"` + LastActivityAt int64 `json:"last_activity_at"` + Status string `json:"status"` + Progress int64 `json:"progress"` + Data map[string]string `json:"data"` } func (j *Job) IsValid() *AppError { @@ -45,6 +49,9 @@ func (j *Job) IsValid() *AppError { switch j.Type { case JOB_TYPE_DATA_RETENTION: case JOB_TYPE_ELASTICSEARCH_POST_INDEXING: + case JOB_TYPE_ELASTICSEARCH_POST_AGGREGATION: + case JOB_TYPE_LDAP_SYNC: + case JOB_TYPE_MESSAGE_EXPORT: default: return NewAppError("Job.IsValid", "model.job.is_valid.type.app_error", nil, "id="+j.Id, http.StatusBadRequest) } @@ -112,6 +119,9 @@ type Worker interface { } type Scheduler interface { - Run() - Stop() + Name() string + JobType() string + Enabled(cfg *Config) bool + NextScheduleTime(cfg *Config, now time.Time, pendingJobs bool, lastSuccessfulJob *Job) *time.Time + ScheduleJob(cfg *Config, pendingJobs bool, lastSuccessfulJob *Job) (*Job, *AppError) } diff --git a/vendor/github.com/mattermost/platform/model/license.go b/vendor/github.com/mattermost/platform/model/license.go index ea108972..a81f882c 100644 --- a/vendor/github.com/mattermost/platform/model/license.go +++ b/vendor/github.com/mattermost/platform/model/license.go @@ -6,6 +6,7 @@ package model import ( "encoding/json" "io" + "net/http" ) const ( @@ -51,7 +52,10 @@ type Features struct { PasswordRequirements *bool `json:"password_requirements"` Elasticsearch *bool `json:"elastic_search"` Announcement *bool `json:"announcement"` + ThemeManagement *bool `json:"theme_management"` EmailNotificationContents *bool `json:"email_notification_contents"` + DataRetention *bool `json:"data_retention"` + MessageExport *bool `json:"message_export"` // after we enabled more features for webrtc we'll need to control them with this FutureFeatures *bool `json:"future_features"` @@ -72,106 +76,96 @@ func (f *Features) ToMap() map[string]interface{} { "password": *f.PasswordRequirements, "elastic_search": *f.Elasticsearch, "email_notification_contents": *f.EmailNotificationContents, + "data_retention": *f.DataRetention, + "message_export": *f.MessageExport, "future": *f.FutureFeatures, } } func (f *Features) SetDefaults() { if f.FutureFeatures == nil { - f.FutureFeatures = new(bool) - *f.FutureFeatures = true + f.FutureFeatures = NewBool(true) } if f.Users == nil { - f.Users = new(int) - *f.Users = 0 + f.Users = NewInt(0) } if f.LDAP == nil { - f.LDAP = new(bool) - *f.LDAP = *f.FutureFeatures + f.LDAP = NewBool(*f.FutureFeatures) } if f.MFA == nil { - f.MFA = new(bool) - *f.MFA = *f.FutureFeatures + f.MFA = NewBool(*f.FutureFeatures) } if f.GoogleOAuth == nil { - f.GoogleOAuth = new(bool) - *f.GoogleOAuth = *f.FutureFeatures + f.GoogleOAuth = NewBool(*f.FutureFeatures) } if f.Office365OAuth == nil { - f.Office365OAuth = new(bool) - *f.Office365OAuth = *f.FutureFeatures + f.Office365OAuth = NewBool(*f.FutureFeatures) } if f.Compliance == nil { - f.Compliance = new(bool) - *f.Compliance = *f.FutureFeatures + f.Compliance = NewBool(*f.FutureFeatures) } if f.Cluster == nil { - f.Cluster = new(bool) - *f.Cluster = *f.FutureFeatures + f.Cluster = NewBool(*f.FutureFeatures) } if f.Metrics == nil { - f.Metrics = new(bool) - *f.Metrics = *f.FutureFeatures + f.Metrics = NewBool(*f.FutureFeatures) } if f.CustomBrand == nil { - f.CustomBrand = new(bool) - *f.CustomBrand = *f.FutureFeatures + f.CustomBrand = NewBool(*f.FutureFeatures) } if f.MHPNS == nil { - f.MHPNS = new(bool) - *f.MHPNS = *f.FutureFeatures + f.MHPNS = NewBool(*f.FutureFeatures) } if f.SAML == nil { - f.SAML = new(bool) - *f.SAML = *f.FutureFeatures + f.SAML = NewBool(*f.FutureFeatures) } if f.PasswordRequirements == nil { - f.PasswordRequirements = new(bool) - *f.PasswordRequirements = *f.FutureFeatures + f.PasswordRequirements = NewBool(*f.FutureFeatures) } if f.Elasticsearch == nil { - f.Elasticsearch = new(bool) - *f.Elasticsearch = *f.FutureFeatures + f.Elasticsearch = NewBool(*f.FutureFeatures) } if f.Announcement == nil { - f.Announcement = new(bool) - *f.Announcement = true + f.Announcement = NewBool(true) + } + + if f.ThemeManagement == nil { + f.ThemeManagement = NewBool(true) } if f.EmailNotificationContents == nil { - f.EmailNotificationContents = new(bool) - *f.EmailNotificationContents = *f.FutureFeatures + f.EmailNotificationContents = NewBool(*f.FutureFeatures) + } + + if f.DataRetention == nil { + f.DataRetention = NewBool(*f.FutureFeatures) + } + + if f.MessageExport == nil { + f.MessageExport = NewBool(*f.FutureFeatures) } } func (l *License) IsExpired() bool { - now := GetMillis() - if l.ExpiresAt < now { - return true - } - return false + return l.ExpiresAt < GetMillis() } func (l *License) IsStarted() bool { - now := GetMillis() - if l.StartsAt < now { - return true - } - return false + return l.StartsAt < GetMillis() } func (l *License) ToJson() string { @@ -196,15 +190,15 @@ func LicenseFromJson(data io.Reader) *License { func (lr *LicenseRecord) IsValid() *AppError { if len(lr.Id) != 26 { - return NewLocAppError("LicenseRecord.IsValid", "model.license_record.is_valid.id.app_error", nil, "") + return NewAppError("LicenseRecord.IsValid", "model.license_record.is_valid.id.app_error", nil, "", http.StatusBadRequest) } if lr.CreateAt == 0 { - return NewLocAppError("LicenseRecord.IsValid", "model.license_record.is_valid.create_at.app_error", nil, "") + return NewAppError("LicenseRecord.IsValid", "model.license_record.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) } if len(lr.Bytes) == 0 || len(lr.Bytes) > 10000 { - return NewLocAppError("LicenseRecord.IsValid", "model.license_record.is_valid.create_at.app_error", nil, "") + return NewAppError("LicenseRecord.IsValid", "model.license_record.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) } return nil diff --git a/vendor/github.com/mattermost/platform/model/manifest.go b/vendor/github.com/mattermost/platform/model/manifest.go new file mode 100644 index 00000000..03d78f84 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/manifest.go @@ -0,0 +1,246 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + + "gopkg.in/yaml.v2" +) + +const ( + PLUGIN_CONFIG_TYPE_TEXT = "text" + PLUGIN_CONFIG_TYPE_BOOL = "bool" + PLUGIN_CONFIG_TYPE_RADIO = "radio" + PLUGIN_CONFIG_TYPE_DROPDOWN = "dropdown" + PLUGIN_CONFIG_TYPE_GENERATED = "generated" + PLUGIN_CONFIG_TYPE_USERNAME = "username" +) + +type PluginOption struct { + // The display name for the option. + DisplayName string `json:"display_name" yaml:"display_name"` + + // The string value for the option. + Value string `json:"value" yaml:"value"` +} + +type PluginSetting struct { + // The key that the setting will be assigned to in the configuration file. + Key string `json:"key" yaml:"key"` + + // The display name for the setting. + DisplayName string `json:"display_name" yaml:"display_name"` + + // The type of the setting. + // + // "bool" will result in a boolean true or false setting. + // + // "dropdown" will result in a string setting that allows the user to select from a list of + // pre-defined options. + // + // "generated" will result in a string setting that is set to a random, cryptographically secure + // string. + // + // "radio" will result in a string setting that allows the user to select from a short selection + // of pre-defined options. + // + // "text" will result in a string setting that can be typed in manually. + // + // "username" will result in a text setting that will autocomplete to a username. + Type string `json:"type" yaml:"type"` + + // The help text to display to the user. + HelpText string `json:"help_text" yaml:"help_text"` + + // The help text to display alongside the "Regenerate" button for settings of the "generated" type. + RegenerateHelpText string `json:"regenerate_help_text,omitempty" yaml:"regenerate_help_text,omitempty"` + + // The placeholder to display for "text", "generated" and "username" types when blank. + Placeholder string `json:"placeholder" yaml:"placeholder"` + + // The default value of the setting. + Default interface{} `json:"default" yaml:"default"` + + // For "radio" or "dropdown" settings, this is the list of pre-defined options that the user can choose + // from. + Options []*PluginOption `json:"options,omitempty" yaml:"options,omitempty"` +} + +type PluginSettingsSchema struct { + // Optional text to display above the settings. + Header string `json:"header" yaml:"header"` + + // Optional text to display below the settings. + Footer string `json:"footer" yaml:"footer"` + + // A list of setting definitions. + Settings []*PluginSetting `json:"settings" yaml:"settings"` +} + +// The plugin manifest defines the metadata required to load and present your plugin. The manifest +// file should be named plugin.json or plugin.yaml and placed in the top of your +// plugin bundle. +// +// Example plugin.yaml: +// +// id: com.mycompany.myplugin +// name: My Plugin +// description: This is my plugin. It does stuff. +// backend: +// executable: myplugin +// settings_schema: +// settings: +// - key: enable_extra_thing +// type: bool +// display_name: Enable Extra Thing +// help_text: When true, an extra thing will be enabled! +// default: false +type Manifest struct { + // The id is a globally unique identifier that represents your plugin. Ids are limited + // to 190 characters. Reverse-DNS notation using a name you control is a good option. + // For example, "com.mycompany.myplugin". + Id string `json:"id" yaml:"id"` + + // The name to be displayed for the plugin. + Name string `json:"name,omitempty" yaml:"name,omitempty"` + + // A description of what your plugin is and does. + Description string `json:"description,omitempty" yaml:"description,omitempty"` + + // A version number for your plugin. Semantic versioning is recommended: http://semver.org + Version string `json:"version" yaml:"version"` + + // If your plugin extends the server, you'll need define backend. + Backend *ManifestBackend `json:"backend,omitempty" yaml:"backend,omitempty"` + + // If your plugin extends the web app, you'll need to define webapp. + Webapp *ManifestWebapp `json:"webapp,omitempty" yaml:"webapp,omitempty"` + + // To allow administrators to configure your plugin via the Mattermost system console, you can + // provide your settings schema. + SettingsSchema *PluginSettingsSchema `json:"settings_schema,omitempty" yaml:"settings_schema,omitempty"` +} + +type ManifestBackend struct { + // The path to your executable binary. This should be relative to the root of your bundle and the + // location of the manifest file. + // + // On Windows, this file must have a ".exe" extension. + Executable string `json:"executable" yaml:"executable"` +} + +type ManifestWebapp struct { + // The path to your webapp bundle. This should be relative to the root of your bundle and the + // location of the manifest file. + BundlePath string `json:"bundle_path" yaml:"bundle_path"` +} + +func (m *Manifest) ToJson() string { + b, err := json.Marshal(m) + if err != nil { + return "" + } else { + return string(b) + } +} + +func ManifestListToJson(m []*Manifest) string { + b, err := json.Marshal(m) + if err != nil { + return "" + } else { + return string(b) + } +} + +func ManifestFromJson(data io.Reader) *Manifest { + decoder := json.NewDecoder(data) + var m Manifest + err := decoder.Decode(&m) + if err == nil { + return &m + } else { + return nil + } +} + +func ManifestListFromJson(data io.Reader) []*Manifest { + decoder := json.NewDecoder(data) + var manifests []*Manifest + err := decoder.Decode(&manifests) + if err == nil { + return manifests + } else { + return nil + } +} + +func (m *Manifest) HasClient() bool { + return m.Webapp != nil +} + +func (m *Manifest) ClientManifest() *Manifest { + cm := new(Manifest) + *cm = *m + cm.Name = "" + cm.Description = "" + cm.Backend = nil + return cm +} + +// FindManifest will find and parse the manifest in a given directory. +// +// In all cases other than a does-not-exist error, path is set to the path of the manifest file that was +// found. +// +// Manifests are JSON or YAML files named plugin.json, plugin.yaml, or plugin.yml. +func FindManifest(dir string) (manifest *Manifest, path string, err error) { + for _, name := range []string{"plugin.yml", "plugin.yaml"} { + path = filepath.Join(dir, name) + f, ferr := os.Open(path) + if ferr != nil { + if !os.IsNotExist(ferr) { + err = ferr + return + } + continue + } + b, ioerr := ioutil.ReadAll(f) + f.Close() + if ioerr != nil { + err = ioerr + return + } + var parsed Manifest + err = yaml.Unmarshal(b, &parsed) + if err != nil { + return + } + manifest = &parsed + return + } + + path = filepath.Join(dir, "plugin.json") + f, ferr := os.Open(path) + if ferr != nil { + if os.IsNotExist(ferr) { + path = "" + } + err = ferr + return + } + defer f.Close() + var parsed Manifest + err = json.NewDecoder(f).Decode(&parsed) + if err != nil { + return + } + manifest = &parsed + return +} diff --git a/vendor/github.com/mattermost/platform/model/message_export.go b/vendor/github.com/mattermost/platform/model/message_export.go new file mode 100644 index 00000000..b59b114d --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/message_export.go @@ -0,0 +1,18 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +type MessageExport struct { + ChannelId *string + ChannelDisplayName *string + + UserId *string + UserEmail *string + + PostId *string + PostCreateAt *int64 + PostMessage *string + PostType *string + PostFileIds StringArray +} diff --git a/vendor/github.com/mattermost/platform/model/outgoing_webhook.go b/vendor/github.com/mattermost/platform/model/outgoing_webhook.go index 59408c24..477a277d 100644 --- a/vendor/github.com/mattermost/platform/model/outgoing_webhook.go +++ b/vendor/github.com/mattermost/platform/model/outgoing_webhook.go @@ -45,6 +45,17 @@ type OutgoingWebhookPayload struct { FileIds string `json:"file_ids"` } +type OutgoingWebhookResponse struct { + Text *string `json:"text"` + Username string `json:"username"` + IconURL string `json:"icon_url"` + Props StringInterface `json:"props"` + Type string `json:"type"` + ResponseType string `json:"response_type"` +} + +const OUTGOING_HOOK_RESPONSE_TYPE_COMMENT = "comment" + func (o *OutgoingWebhookPayload) ToJSON() string { b, err := json.Marshal(o) if err != nil { @@ -112,6 +123,26 @@ func OutgoingWebhookListFromJson(data io.Reader) []*OutgoingWebhook { } } +func (o *OutgoingWebhookResponse) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func OutgoingWebhookResponseFromJson(data io.Reader) *OutgoingWebhookResponse { + decoder := json.NewDecoder(data) + var o OutgoingWebhookResponse + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} + func (o *OutgoingWebhook) IsValid() *AppError { if len(o.Id) != 26 { diff --git a/vendor/github.com/mattermost/platform/model/plugin_key_value.go b/vendor/github.com/mattermost/platform/model/plugin_key_value.go new file mode 100644 index 00000000..b7a7731c --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/plugin_key_value.go @@ -0,0 +1,32 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "net/http" + "unicode/utf8" +) + +const ( + KEY_VALUE_PLUGIN_ID_MAX_RUNES = 190 + KEY_VALUE_KEY_MAX_RUNES = 50 +) + +type PluginKeyValue struct { + PluginId string `json:"plugin_id"` + Key string `json:"key" db:"PKey"` + Value []byte `json:"value" db:"PValue"` +} + +func (kv *PluginKeyValue) IsValid() *AppError { + if len(kv.PluginId) == 0 || utf8.RuneCountInString(kv.PluginId) > KEY_VALUE_PLUGIN_ID_MAX_RUNES { + return NewAppError("PluginKeyValue.IsValid", "model.plugin_key_value.is_valid.plugin_id.app_error", map[string]interface{}{"Max": KEY_VALUE_KEY_MAX_RUNES, "Min": 0}, "key="+kv.Key, http.StatusBadRequest) + } + + if len(kv.Key) == 0 || utf8.RuneCountInString(kv.Key) > KEY_VALUE_KEY_MAX_RUNES { + return NewAppError("PluginKeyValue.IsValid", "model.plugin_key_value.is_valid.key.app_error", map[string]interface{}{"Max": KEY_VALUE_KEY_MAX_RUNES, "Min": 0}, "key="+kv.Key, http.StatusBadRequest) + } + + return nil +} diff --git a/vendor/github.com/mattermost/platform/model/plugins_response.go b/vendor/github.com/mattermost/platform/model/plugins_response.go new file mode 100644 index 00000000..74c89af2 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/plugins_response.go @@ -0,0 +1,39 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type PluginInfo struct { + Manifest + Prepackaged bool `json:"prepackaged"` +} + +type PluginsResponse struct { + Active []*PluginInfo `json:"active"` + Inactive []*PluginInfo `json:"inactive"` +} + +func (m *PluginsResponse) ToJson() string { + b, err := json.Marshal(m) + if err != nil { + return "" + } else { + return string(b) + } +} + +func PluginsResponseFromJson(data io.Reader) *PluginsResponse { + decoder := json.NewDecoder(data) + var m PluginsResponse + err := decoder.Decode(&m) + if err == nil { + return &m + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/post.go b/vendor/github.com/mattermost/platform/model/post.go index 55e6f591..6b282fbf 100644 --- a/vendor/github.com/mattermost/platform/model/post.go +++ b/vendor/github.com/mattermost/platform/model/post.go @@ -6,6 +6,9 @@ package model import ( "encoding/json" "io" + "net/http" + "regexp" + "strings" "unicode/utf8" ) @@ -17,9 +20,13 @@ const ( POST_JOIN_LEAVE = "system_join_leave" // Deprecated, use POST_JOIN_CHANNEL or POST_LEAVE_CHANNEL instead POST_JOIN_CHANNEL = "system_join_channel" POST_LEAVE_CHANNEL = "system_leave_channel" + POST_JOIN_TEAM = "system_join_team" + POST_LEAVE_TEAM = "system_leave_team" POST_ADD_REMOVE = "system_add_remove" // Deprecated, use POST_ADD_TO_CHANNEL or POST_REMOVE_FROM_CHANNEL instead POST_ADD_TO_CHANNEL = "system_add_to_channel" POST_REMOVE_FROM_CHANNEL = "system_remove_from_channel" + POST_ADD_TO_TEAM = "system_add_to_team" + POST_REMOVE_FROM_TEAM = "system_remove_from_team" POST_HEADER_CHANGE = "system_header_change" POST_DISPLAYNAME_CHANGE = "system_displayname_change" POST_PURPOSE_CHANGE = "system_purpose_change" @@ -30,6 +37,9 @@ const ( POST_HASHTAGS_MAX_RUNES = 1000 POST_MESSAGE_MAX_RUNES = 4000 POST_PROPS_MAX_RUNES = 8000 + POST_PROPS_MAX_USER_RUNES = POST_PROPS_MAX_RUNES - 400 // Leave some room for system / pre-save modifications + POST_CUSTOM_TYPE_PREFIX = "custom_" + PROPS_ADD_CHANNEL_MEMBER = "add_channel_member" ) type Post struct { @@ -68,8 +78,31 @@ type PostForIndexing struct { ParentCreateAt *int64 `json:"parent_create_at"` } +type PostAction struct { + Id string `json:"id"` + Name string `json:"name"` + Integration *PostActionIntegration `json:"integration,omitempty"` +} + +type PostActionIntegration struct { + URL string `json:"url,omitempty"` + Context StringInterface `json:"context,omitempty"` +} + +type PostActionIntegrationRequest struct { + UserId string `json:"user_id"` + Context StringInterface `json:"context,omitempty"` +} + +type PostActionIntegrationResponse struct { + Update *Post `json:"update"` + EphemeralText string `json:"ephemeral_text"` +} + func (o *Post) ToJson() string { - b, err := json.Marshal(o) + copy := *o + copy.StripActionIntegrations() + b, err := json.Marshal(©) if err != nil { return "" } else { @@ -95,73 +128,100 @@ func (o *Post) Etag() string { func (o *Post) IsValid() *AppError { if len(o.Id) != 26 { - return NewLocAppError("Post.IsValid", "model.post.is_valid.id.app_error", nil, "") + return NewAppError("Post.IsValid", "model.post.is_valid.id.app_error", nil, "", http.StatusBadRequest) } if o.CreateAt == 0 { - return NewLocAppError("Post.IsValid", "model.post.is_valid.create_at.app_error", nil, "id="+o.Id) + return NewAppError("Post.IsValid", "model.post.is_valid.create_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if o.UpdateAt == 0 { - return NewLocAppError("Post.IsValid", "model.post.is_valid.update_at.app_error", nil, "id="+o.Id) + return NewAppError("Post.IsValid", "model.post.is_valid.update_at.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if len(o.UserId) != 26 { - return NewLocAppError("Post.IsValid", "model.post.is_valid.user_id.app_error", nil, "") + return NewAppError("Post.IsValid", "model.post.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } if len(o.ChannelId) != 26 { - return NewLocAppError("Post.IsValid", "model.post.is_valid.channel_id.app_error", nil, "") + return NewAppError("Post.IsValid", "model.post.is_valid.channel_id.app_error", nil, "", http.StatusBadRequest) } if !(len(o.RootId) == 26 || len(o.RootId) == 0) { - return NewLocAppError("Post.IsValid", "model.post.is_valid.root_id.app_error", nil, "") + return NewAppError("Post.IsValid", "model.post.is_valid.root_id.app_error", nil, "", http.StatusBadRequest) } if !(len(o.ParentId) == 26 || len(o.ParentId) == 0) { - return NewLocAppError("Post.IsValid", "model.post.is_valid.parent_id.app_error", nil, "") + return NewAppError("Post.IsValid", "model.post.is_valid.parent_id.app_error", nil, "", http.StatusBadRequest) } if len(o.ParentId) == 26 && len(o.RootId) == 0 { - return NewLocAppError("Post.IsValid", "model.post.is_valid.root_parent.app_error", nil, "") + return NewAppError("Post.IsValid", "model.post.is_valid.root_parent.app_error", nil, "", http.StatusBadRequest) } if !(len(o.OriginalId) == 26 || len(o.OriginalId) == 0) { - return NewLocAppError("Post.IsValid", "model.post.is_valid.original_id.app_error", nil, "") + return NewAppError("Post.IsValid", "model.post.is_valid.original_id.app_error", nil, "", http.StatusBadRequest) } if utf8.RuneCountInString(o.Message) > POST_MESSAGE_MAX_RUNES { - return NewLocAppError("Post.IsValid", "model.post.is_valid.msg.app_error", nil, "id="+o.Id) + return NewAppError("Post.IsValid", "model.post.is_valid.msg.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if utf8.RuneCountInString(o.Hashtags) > POST_HASHTAGS_MAX_RUNES { - return NewLocAppError("Post.IsValid", "model.post.is_valid.hashtags.app_error", nil, "id="+o.Id) + return NewAppError("Post.IsValid", "model.post.is_valid.hashtags.app_error", nil, "id="+o.Id, http.StatusBadRequest) } - // should be removed once more message types are supported - if !(o.Type == POST_DEFAULT || o.Type == POST_JOIN_LEAVE || o.Type == POST_ADD_REMOVE || - o.Type == POST_JOIN_CHANNEL || o.Type == POST_LEAVE_CHANNEL || - o.Type == POST_REMOVE_FROM_CHANNEL || o.Type == POST_ADD_TO_CHANNEL || - o.Type == POST_SLACK_ATTACHMENT || o.Type == POST_HEADER_CHANGE || o.Type == POST_PURPOSE_CHANGE || - o.Type == POST_DISPLAYNAME_CHANGE || o.Type == POST_CHANNEL_DELETED) { - return NewLocAppError("Post.IsValid", "model.post.is_valid.type.app_error", nil, "id="+o.Type) + switch o.Type { + case + POST_DEFAULT, + POST_JOIN_LEAVE, + POST_ADD_REMOVE, + POST_JOIN_CHANNEL, + POST_LEAVE_CHANNEL, + POST_JOIN_TEAM, + POST_LEAVE_TEAM, + POST_ADD_TO_CHANNEL, + POST_REMOVE_FROM_CHANNEL, + POST_ADD_TO_TEAM, + POST_REMOVE_FROM_TEAM, + POST_SLACK_ATTACHMENT, + POST_HEADER_CHANGE, + POST_PURPOSE_CHANGE, + POST_DISPLAYNAME_CHANGE, + POST_CHANNEL_DELETED: + default: + if !strings.HasPrefix(o.Type, POST_CUSTOM_TYPE_PREFIX) { + return NewAppError("Post.IsValid", "model.post.is_valid.type.app_error", nil, "id="+o.Type, http.StatusBadRequest) + } } if utf8.RuneCountInString(ArrayToJson(o.Filenames)) > POST_FILENAMES_MAX_RUNES { - return NewLocAppError("Post.IsValid", "model.post.is_valid.filenames.app_error", nil, "id="+o.Id) + return NewAppError("Post.IsValid", "model.post.is_valid.filenames.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if utf8.RuneCountInString(ArrayToJson(o.FileIds)) > POST_FILEIDS_MAX_RUNES { - return NewLocAppError("Post.IsValid", "model.post.is_valid.file_ids.app_error", nil, "id="+o.Id) + return NewAppError("Post.IsValid", "model.post.is_valid.file_ids.app_error", nil, "id="+o.Id, http.StatusBadRequest) } if utf8.RuneCountInString(StringInterfaceToJson(o.Props)) > POST_PROPS_MAX_RUNES { - return NewLocAppError("Post.IsValid", "model.post.is_valid.props.app_error", nil, "id="+o.Id) + return NewAppError("Post.IsValid", "model.post.is_valid.props.app_error", nil, "id="+o.Id, http.StatusBadRequest) } return nil } +func (o *Post) SanitizeProps() { + membersToSanitize := []string{ + PROPS_ADD_CHANNEL_MEMBER, + } + + for _, member := range membersToSanitize { + if _, ok := o.Props[member]; ok { + delete(o.Props, member) + } + } +} + func (o *Post) PreSave() { if o.Id == "" { o.Id = NewId() @@ -174,7 +234,10 @@ func (o *Post) PreSave() { } o.UpdateAt = o.CreateAt + o.PreCommit() +} +func (o *Post) PreCommit() { if o.Props == nil { o.Props = make(map[string]interface{}) } @@ -186,6 +249,8 @@ func (o *Post) PreSave() { if o.FileIds == nil { o.FileIds = []string{} } + + o.GenerateActionIds() } func (o *Post) MakeNonNil() { @@ -246,3 +311,84 @@ func PostPatchFromJson(data io.Reader) *PostPatch { return &post } + +var channelMentionRegexp = regexp.MustCompile(`\B~[a-zA-Z0-9\-_]+`) + +func (o *Post) ChannelMentions() (names []string) { + if strings.Contains(o.Message, "~") { + alreadyMentioned := make(map[string]bool) + for _, match := range channelMentionRegexp.FindAllString(o.Message, -1) { + name := match[1:] + if !alreadyMentioned[name] { + names = append(names, name) + alreadyMentioned[name] = true + } + } + } + return +} + +func (r *PostActionIntegrationRequest) ToJson() string { + b, err := json.Marshal(r) + if err != nil { + return "" + } else { + return string(b) + } +} + +func (o *Post) Attachments() []*SlackAttachment { + if attachments, ok := o.Props["attachments"].([]*SlackAttachment); ok { + return attachments + } + var ret []*SlackAttachment + if attachments, ok := o.Props["attachments"].([]interface{}); ok { + for _, attachment := range attachments { + if enc, err := json.Marshal(attachment); err == nil { + var decoded SlackAttachment + if json.Unmarshal(enc, &decoded) == nil { + ret = append(ret, &decoded) + } + } + } + } + return ret +} + +func (o *Post) StripActionIntegrations() { + attachments := o.Attachments() + if o.Props["attachments"] != nil { + o.Props["attachments"] = attachments + } + for _, attachment := range attachments { + for _, action := range attachment.Actions { + action.Integration = nil + } + } +} + +func (o *Post) GetAction(id string) *PostAction { + for _, attachment := range o.Attachments() { + for _, action := range attachment.Actions { + if action.Id == id { + return action + } + } + } + return nil +} + +func (o *Post) GenerateActionIds() { + if o.Props["attachments"] != nil { + o.Props["attachments"] = o.Attachments() + } + if attachments, ok := o.Props["attachments"].([]*SlackAttachment); ok { + for _, attachment := range attachments { + for _, action := range attachment.Actions { + if action.Id == "" { + action.Id = NewId() + } + } + } + } +} diff --git a/vendor/github.com/mattermost/platform/model/post_list.go b/vendor/github.com/mattermost/platform/model/post_list.go index 63f6d682..018f7d14 100644 --- a/vendor/github.com/mattermost/platform/model/post_list.go +++ b/vendor/github.com/mattermost/platform/model/post_list.go @@ -6,6 +6,7 @@ package model import ( "encoding/json" "io" + "sort" ) type PostList struct { @@ -20,8 +21,20 @@ func NewPostList() *PostList { } } +func (o *PostList) StripActionIntegrations() { + posts := o.Posts + o.Posts = make(map[string]*Post) + for id, post := range posts { + pcopy := *post + pcopy.StripActionIntegrations() + o.Posts[id] = &pcopy + } +} + func (o *PostList) ToJson() string { - b, err := json.Marshal(o) + copy := *o + copy.StripActionIntegrations() + b, err := json.Marshal(©) if err != nil { return "" } else { @@ -70,6 +83,12 @@ func (o *PostList) Extend(other *PostList) { } } +func (o *PostList) SortByCreateAt() { + sort.Slice(o.Order, func(i, j int) bool { + return o.Posts[o.Order[i]].CreateAt > o.Posts[o.Order[j]].CreateAt + }) +} + func (o *PostList) Etag() string { id := "0" diff --git a/vendor/github.com/mattermost/platform/model/preference.go b/vendor/github.com/mattermost/platform/model/preference.go index 6bbe7326..5d462de8 100644 --- a/vendor/github.com/mattermost/platform/model/preference.go +++ b/vendor/github.com/mattermost/platform/model/preference.go @@ -6,6 +6,7 @@ package model import ( "encoding/json" "io" + "net/http" "regexp" "strings" "unicode/utf8" @@ -67,25 +68,25 @@ func PreferenceFromJson(data io.Reader) *Preference { func (o *Preference) IsValid() *AppError { if len(o.UserId) != 26 { - return NewLocAppError("Preference.IsValid", "model.preference.is_valid.id.app_error", nil, "user_id="+o.UserId) + return NewAppError("Preference.IsValid", "model.preference.is_valid.id.app_error", nil, "user_id="+o.UserId, http.StatusBadRequest) } if len(o.Category) == 0 || len(o.Category) > 32 { - return NewLocAppError("Preference.IsValid", "model.preference.is_valid.category.app_error", nil, "category="+o.Category) + return NewAppError("Preference.IsValid", "model.preference.is_valid.category.app_error", nil, "category="+o.Category, http.StatusBadRequest) } if len(o.Name) > 32 { - return NewLocAppError("Preference.IsValid", "model.preference.is_valid.name.app_error", nil, "name="+o.Name) + return NewAppError("Preference.IsValid", "model.preference.is_valid.name.app_error", nil, "name="+o.Name, http.StatusBadRequest) } if utf8.RuneCountInString(o.Value) > 2000 { - return NewLocAppError("Preference.IsValid", "model.preference.is_valid.value.app_error", nil, "value="+o.Value) + return NewAppError("Preference.IsValid", "model.preference.is_valid.value.app_error", nil, "value="+o.Value, http.StatusBadRequest) } if o.Category == PREFERENCE_CATEGORY_THEME { var unused map[string]string if err := json.NewDecoder(strings.NewReader(o.Value)).Decode(&unused); err != nil { - return NewLocAppError("Preference.IsValid", "model.preference.is_valid.theme.app_error", nil, "value="+o.Value) + return NewAppError("Preference.IsValid", "model.preference.is_valid.theme.app_error", nil, "value="+o.Value, http.StatusBadRequest) } } diff --git a/vendor/github.com/mattermost/platform/model/push_notification.go b/vendor/github.com/mattermost/platform/model/push_notification.go index 654d1d9a..69719e74 100644 --- a/vendor/github.com/mattermost/platform/model/push_notification.go +++ b/vendor/github.com/mattermost/platform/model/push_notification.go @@ -18,7 +18,9 @@ const ( PUSH_TYPE_MESSAGE = "message" PUSH_TYPE_CLEAR = "clear" - CATEGORY_DM = "DIRECT_MESSAGE" + // The category is set to handle a set of interactive Actions + // with the push notifications + CATEGORY_CAN_REPLY = "CAN_REPLY" MHPNS = "https://push.mattermost.com" ) @@ -34,6 +36,8 @@ type PushNotification struct { ContentAvailable int `json:"cont_ava"` TeamId string `json:"team_id"` ChannelId string `json:"channel_id"` + PostId string `json:"post_id"` + RootId string `json:"root_id"` ChannelName string `json:"channel_name"` Type string `json:"type"` SenderId string `json:"sender_id"` diff --git a/vendor/github.com/mattermost/platform/model/reaction.go b/vendor/github.com/mattermost/platform/model/reaction.go index 3d334c21..4b72dd44 100644 --- a/vendor/github.com/mattermost/platform/model/reaction.go +++ b/vendor/github.com/mattermost/platform/model/reaction.go @@ -6,6 +6,7 @@ package model import ( "encoding/json" "io" + "net/http" "regexp" ) @@ -54,21 +55,21 @@ func ReactionsFromJson(data io.Reader) []*Reaction { func (o *Reaction) IsValid() *AppError { if len(o.UserId) != 26 { - return NewLocAppError("Reaction.IsValid", "model.reaction.is_valid.user_id.app_error", nil, "user_id="+o.UserId) + return NewAppError("Reaction.IsValid", "model.reaction.is_valid.user_id.app_error", nil, "user_id="+o.UserId, http.StatusBadRequest) } if len(o.PostId) != 26 { - return NewLocAppError("Reaction.IsValid", "model.reaction.is_valid.post_id.app_error", nil, "post_id="+o.PostId) + return NewAppError("Reaction.IsValid", "model.reaction.is_valid.post_id.app_error", nil, "post_id="+o.PostId, http.StatusBadRequest) } validName := regexp.MustCompile(`^[a-zA-Z0-9\-\+_]+$`) if len(o.EmojiName) == 0 || len(o.EmojiName) > 64 || !validName.MatchString(o.EmojiName) { - return NewLocAppError("Reaction.IsValid", "model.reaction.is_valid.emoji_name.app_error", nil, "emoji_name="+o.EmojiName) + return NewAppError("Reaction.IsValid", "model.reaction.is_valid.emoji_name.app_error", nil, "emoji_name="+o.EmojiName, http.StatusBadRequest) } if o.CreateAt == 0 { - return NewLocAppError("Reaction.IsValid", "model.reaction.is_valid.create_at.app_error", nil, "") + return NewAppError("Reaction.IsValid", "model.reaction.is_valid.create_at.app_error", nil, "", http.StatusBadRequest) } return nil diff --git a/vendor/github.com/mattermost/platform/model/search_params.go b/vendor/github.com/mattermost/platform/model/search_params.go index 070ac6d2..2feea8da 100644 --- a/vendor/github.com/mattermost/platform/model/search_params.go +++ b/vendor/github.com/mattermost/platform/model/search_params.go @@ -31,16 +31,6 @@ func (o *SearchParams) ToJson() string { var searchFlags = [...]string{"from", "channel", "in"} -func splitWordsNoQuotes(text string) []string { - words := []string{} - - for _, word := range strings.Fields(text) { - words = append(words, word) - } - - return words -} - func splitWords(text string) []string { words := []string{} @@ -55,14 +45,14 @@ func splitWords(text string) []string { foundQuote = false location = i + 1 } else { - words = append(words, splitWordsNoQuotes(text[location:i])...) + words = append(words, strings.Fields(text[location:i])...) foundQuote = true location = i } } } - words = append(words, splitWordsNoQuotes(text[location:])...) + words = append(words, strings.Fields(text[location:])...) return words } diff --git a/vendor/github.com/mattermost/platform/model/session.go b/vendor/github.com/mattermost/platform/model/session.go index 960c18cb..704af067 100644 --- a/vendor/github.com/mattermost/platform/model/session.go +++ b/vendor/github.com/mattermost/platform/model/session.go @@ -37,6 +37,11 @@ type Session struct { TeamMembers []*TeamMember `json:"team_members" db:"-"` } +func (me *Session) DeepCopy() *Session { + copy := *me + return © +} + func (me *Session) ToJson() string { b, err := json.Marshal(me) if err != nil { diff --git a/vendor/github.com/mattermost/platform/model/slack_attachment.go b/vendor/github.com/mattermost/platform/model/slack_attachment.go index 85583821..197d3f0f 100644 --- a/vendor/github.com/mattermost/platform/model/slack_attachment.go +++ b/vendor/github.com/mattermost/platform/model/slack_attachment.go @@ -5,7 +5,6 @@ package model import ( "fmt" - "strings" ) type SlackAttachment struct { @@ -25,6 +24,7 @@ type SlackAttachment struct { Footer string `json:"footer"` FooterIcon string `json:"footer_icon"` Timestamp interface{} `json:"ts"` // This is either a string or an int64 + Actions []*PostAction `json:"actions,omitempty"` } type SlackAttachmentField struct { @@ -33,23 +33,7 @@ type SlackAttachmentField struct { Short bool `json:"short"` } -// To mention @channel via a webhook in Slack, the message should contain -// <!channel>, as explained at the bottom of this article: -// https://get.slack.help/hc/en-us/articles/202009646-Making-announcements -func ExpandAnnouncement(text string) string { - c1 := "<!channel>" - c2 := "@channel" - if strings.Contains(text, c1) { - return strings.Replace(text, c1, c2, -1) - } - return text -} - -// Expand announcements in incoming webhooks from Slack. Those announcements -// can be found in the text attribute, or in the pretext, text, title and value -// attributes of the attachment structure. The Slack attachment structure is -// documented here: https://api.slack.com/docs/attachments -func ProcessSlackAttachments(a []*SlackAttachment) []*SlackAttachment { +func StringifySlackFieldValue(a []*SlackAttachment) []*SlackAttachment { var nonNilAttachments []*SlackAttachment for _, attachment := range a { if attachment == nil { @@ -57,10 +41,6 @@ func ProcessSlackAttachments(a []*SlackAttachment) []*SlackAttachment { } nonNilAttachments = append(nonNilAttachments, attachment) - attachment.Pretext = ExpandAnnouncement(attachment.Pretext) - attachment.Text = ExpandAnnouncement(attachment.Text) - attachment.Title = ExpandAnnouncement(attachment.Title) - var nonNilFields []*SlackAttachmentField for _, field := range attachment.Fields { if field == nil { @@ -70,7 +50,7 @@ func ProcessSlackAttachments(a []*SlackAttachment) []*SlackAttachment { if field.Value != nil { // Ensure the value is set to a string if it is set - field.Value = ExpandAnnouncement(fmt.Sprintf("%v", field.Value)) + field.Value = fmt.Sprintf("%v", field.Value) } } attachment.Fields = nonNilFields diff --git a/vendor/github.com/mattermost/platform/model/status.go b/vendor/github.com/mattermost/platform/model/status.go index d838eea0..6da6161e 100644 --- a/vendor/github.com/mattermost/platform/model/status.go +++ b/vendor/github.com/mattermost/platform/model/status.go @@ -11,6 +11,7 @@ import ( const ( STATUS_OFFLINE = "offline" STATUS_AWAY = "away" + STATUS_DND = "dnd" STATUS_ONLINE = "online" STATUS_CACHE_SIZE = SESSION_CACHE_SIZE STATUS_CHANNEL_TIMEOUT = 20000 // 20 seconds diff --git a/vendor/github.com/mattermost/platform/model/team_member.go b/vendor/github.com/mattermost/platform/model/team_member.go index 3a0befa3..6c70b75e 100644 --- a/vendor/github.com/mattermost/platform/model/team_member.go +++ b/vendor/github.com/mattermost/platform/model/team_member.go @@ -6,6 +6,7 @@ package model import ( "encoding/json" "io" + "net/http" "strings" ) @@ -103,11 +104,11 @@ func TeamsUnreadFromJson(data io.Reader) []*TeamUnread { func (o *TeamMember) IsValid() *AppError { if len(o.TeamId) != 26 { - return NewLocAppError("TeamMember.IsValid", "model.team_member.is_valid.team_id.app_error", nil, "") + return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.team_id.app_error", nil, "", http.StatusBadRequest) } if len(o.UserId) != 26 { - return NewLocAppError("TeamMember.IsValid", "model.team_member.is_valid.user_id.app_error", nil, "") + return NewAppError("TeamMember.IsValid", "model.team_member.is_valid.user_id.app_error", nil, "", http.StatusBadRequest) } return nil diff --git a/vendor/github.com/mattermost/platform/model/user.go b/vendor/github.com/mattermost/platform/model/user.go index ab4b21e2..7e767fd5 100644 --- a/vendor/github.com/mattermost/platform/model/user.go +++ b/vendor/github.com/mattermost/platform/model/user.go @@ -88,6 +88,12 @@ type UserPatch struct { Locale *string `json:"locale"` } +type UserAuth struct { + Password string `json:"password,omitempty"` + AuthData *string `json:"auth_data,omitempty"` + AuthService string `json:"auth_service,omitempty"` +} + // IsValid validates the user and returns an error if it isn't configured // correctly. func (u *User) IsValid() *AppError { @@ -228,16 +234,13 @@ func (u *User) SetDefaultNotifications() { u.NotifyProps = make(map[string]string) u.NotifyProps["email"] = "true" u.NotifyProps["push"] = USER_NOTIFY_MENTION - u.NotifyProps["desktop"] = USER_NOTIFY_ALL + u.NotifyProps["desktop"] = USER_NOTIFY_MENTION u.NotifyProps["desktop_sound"] = "true" u.NotifyProps["mention_keys"] = u.Username + ",@" + u.Username u.NotifyProps["channel"] = "true" - - if u.FirstName == "" { - u.NotifyProps["first_name"] = "false" - } else { - u.NotifyProps["first_name"] = "true" - } + u.NotifyProps["push_status"] = STATUS_AWAY + u.NotifyProps["comments"] = "never" + u.NotifyProps["first_name"] = "false" } func (user *User) UpdateMentionKeysFromUsername(oldUsername string) { @@ -312,6 +315,15 @@ func (u *UserPatch) ToJson() string { } } +func (u *UserAuth) ToJson() string { + b, err := json.Marshal(u) + if err != nil { + return "" + } else { + return string(b) + } +} + // Generate a valid strong etag so the browser can cache the results func (u *User) Etag(showFullName, showEmail bool) string { return Etag(u.Id, u.UpdateAt, showFullName, showEmail) @@ -320,8 +332,7 @@ func (u *User) Etag(showFullName, showEmail bool) string { // Remove any private data from the user object func (u *User) Sanitize(options map[string]bool) { u.Password = "" - u.AuthData = new(string) - *u.AuthData = "" + u.AuthData = NewString("") u.MfaSecret = "" if len(options) != 0 && !options["email"] { @@ -341,12 +352,10 @@ func (u *User) Sanitize(options map[string]bool) { func (u *User) ClearNonProfileFields() { u.Password = "" - u.AuthData = new(string) - *u.AuthData = "" + u.AuthData = NewString("") u.MfaSecret = "" u.EmailVerified = false u.AllowMarketing = false - u.Props = StringMap{} u.NotifyProps = StringMap{} u.LastPasswordUpdate = 0 u.FailedAttempts = 0 @@ -437,7 +446,7 @@ func IsValidUserRoles(userRoles string) bool { } func isValidRole(roleId string) bool { - _, ok := BuiltInRoles[roleId] + _, ok := DefaultRoles[roleId] return ok } @@ -500,6 +509,17 @@ func UserPatchFromJson(data io.Reader) *UserPatch { } } +func UserAuthFromJson(data io.Reader) *UserAuth { + decoder := json.NewDecoder(data) + var user UserAuth + err := decoder.Decode(&user) + if err == nil { + return &user + } else { + return nil + } +} + func UserMapToJson(u map[string]*User) string { b, err := json.Marshal(u) if err != nil { diff --git a/vendor/github.com/mattermost/platform/model/user_access_token.go b/vendor/github.com/mattermost/platform/model/user_access_token.go index 090780fd..e189ec23 100644 --- a/vendor/github.com/mattermost/platform/model/user_access_token.go +++ b/vendor/github.com/mattermost/platform/model/user_access_token.go @@ -14,6 +14,7 @@ type UserAccessToken struct { Token string `json:"token,omitempty"` UserId string `json:"user_id"` Description string `json:"description"` + IsActive bool `json:"is_active"` } func (t *UserAccessToken) IsValid() *AppError { @@ -38,6 +39,7 @@ func (t *UserAccessToken) IsValid() *AppError { func (t *UserAccessToken) PreSave() { t.Id = NewId() + t.IsActive = true } func (t *UserAccessToken) ToJson() string { diff --git a/vendor/github.com/mattermost/platform/model/utils.go b/vendor/github.com/mattermost/platform/model/utils.go index 090644ec..e84d44f7 100644 --- a/vendor/github.com/mattermost/platform/model/utils.go +++ b/vendor/github.com/mattermost/platform/model/utils.go @@ -12,12 +12,14 @@ import ( "io" "io/ioutil" "net" + "net/http" "net/mail" "net/url" "regexp" "strconv" "strings" "time" + "unicode" goi18n "github.com/nicksnyder/go-i18n/i18n" "github.com/pborman/uuid" @@ -90,7 +92,7 @@ func AppErrorFromJson(data io.Reader) *AppError { if err == nil { return &er } else { - return NewLocAppError("AppErrorFromJson", "model.utils.decode_json.app_error", nil, "body: "+str) + return NewAppError("AppErrorFromJson", "model.utils.decode_json.app_error", nil, "body: "+str, http.StatusInternalServerError) } } @@ -106,18 +108,6 @@ func NewAppError(where string, id string, params map[string]interface{}, details return ap } -func NewLocAppError(where string, id string, params map[string]interface{}, details string) *AppError { - ap := &AppError{} - ap.Id = id - ap.params = params - ap.Message = id - ap.Where = where - ap.DetailedError = details - ap.StatusCode = 500 - ap.IsOAuth = false - return ap -} - var encoding = base32.NewEncoding("ybndrfg8ejkmcpqxot1uwisza345h769") // NewId is a globally unique identifier. It is a [A-Z0-9] string 26 @@ -283,11 +273,7 @@ func GetServerIpAddress() string { } func IsLower(s string) bool { - if strings.ToLower(s) == s { - return true - } - - return false + return strings.ToLower(s) == s } func IsValidEmail(email string) bool { @@ -492,3 +478,17 @@ func IsValidNumberString(value string) bool { return true } + +func IsValidId(value string) bool { + if len(value) != 26 { + return false + } + + for _, r := range value { + if !unicode.IsLetter(r) && !unicode.IsNumber(r) { + return false + } + } + + return true +} diff --git a/vendor/github.com/mattermost/platform/model/version.go b/vendor/github.com/mattermost/platform/model/version.go index b08af46b..430924ee 100644 --- a/vendor/github.com/mattermost/platform/model/version.go +++ b/vendor/github.com/mattermost/platform/model/version.go @@ -13,6 +13,11 @@ import ( // It should be maitained in chronological order with most current // release at the front of the list. var versions = []string{ + "4.6.0", + "4.5.0", + "4.4.0", + "4.3.0", + "4.2.0", "4.1.0", "4.0.0", "3.10.0", diff --git a/vendor/github.com/mattermost/platform/model/websocket_client.go b/vendor/github.com/mattermost/platform/model/websocket_client.go index 2fa405f3..e5c44dde 100644 --- a/vendor/github.com/mattermost/platform/model/websocket_client.go +++ b/vendor/github.com/mattermost/platform/model/websocket_client.go @@ -5,6 +5,8 @@ package model import ( "encoding/json" + "net/http" + "github.com/gorilla/websocket" ) @@ -29,7 +31,7 @@ type WebSocketClient struct { func NewWebSocketClient(url, authToken string) (*WebSocketClient, *AppError) { conn, _, err := websocket.DefaultDialer.Dial(url+API_URL_SUFFIX_V3+"/users/websocket", nil) if err != nil { - return nil, NewLocAppError("NewWebSocketClient", "model.websocket_client.connect_fail.app_error", nil, err.Error()) + return nil, NewAppError("NewWebSocketClient", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) } client := &WebSocketClient{ @@ -54,7 +56,7 @@ func NewWebSocketClient(url, authToken string) (*WebSocketClient, *AppError) { func NewWebSocketClient4(url, authToken string) (*WebSocketClient, *AppError) { conn, _, err := websocket.DefaultDialer.Dial(url+API_URL_SUFFIX+"/websocket", nil) if err != nil { - return nil, NewLocAppError("NewWebSocketClient4", "model.websocket_client.connect_fail.app_error", nil, err.Error()) + return nil, NewAppError("NewWebSocketClient4", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) } client := &WebSocketClient{ @@ -78,7 +80,7 @@ func (wsc *WebSocketClient) Connect() *AppError { var err error wsc.Conn, _, err = websocket.DefaultDialer.Dial(wsc.ConnectUrl, nil) if err != nil { - return NewLocAppError("Connect", "model.websocket_client.connect_fail.app_error", nil, err.Error()) + return NewAppError("Connect", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) } wsc.EventChannel = make(chan *WebSocketEvent, 100) @@ -106,7 +108,7 @@ func (wsc *WebSocketClient) Listen() { var err error if _, rawMsg, err = wsc.Conn.ReadMessage(); err != nil { if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseNoStatusReceived) { - wsc.ListenError = NewLocAppError("NewWebSocketClient", "model.websocket_client.connect_fail.app_error", nil, err.Error()) + wsc.ListenError = NewAppError("NewWebSocketClient", "model.websocket_client.connect_fail.app_error", nil, err.Error(), http.StatusInternalServerError) } return diff --git a/vendor/github.com/mattermost/platform/model/websocket_message.go b/vendor/github.com/mattermost/platform/model/websocket_message.go index 6b8c0342..bf2535dc 100644 --- a/vendor/github.com/mattermost/platform/model/websocket_message.go +++ b/vendor/github.com/mattermost/platform/model/websocket_message.go @@ -24,6 +24,7 @@ const ( WEBSOCKET_EVENT_UPDATE_TEAM = "update_team" WEBSOCKET_EVENT_USER_ADDED = "user_added" WEBSOCKET_EVENT_USER_UPDATED = "user_updated" + WEBSOCKET_EVENT_USER_ROLE_UPDATED = "user_role_updated" WEBSOCKET_EVENT_MEMBERROLE_UPDATED = "memberrole_updated" WEBSOCKET_EVENT_USER_REMOVED = "user_removed" WEBSOCKET_EVENT_PREFERENCE_CHANGED = "preference_changed" @@ -39,6 +40,8 @@ const ( WEBSOCKET_EVENT_RESPONSE = "response" WEBSOCKET_EVENT_EMOJI_ADDED = "emoji_added" WEBSOCKET_EVENT_CHANNEL_VIEWED = "channel_viewed" + WEBSOCKET_EVENT_PLUGIN_ACTIVATED = "plugin_activated" // EXPERIMENTAL - SUBJECT TO CHANGE + WEBSOCKET_EVENT_PLUGIN_DEACTIVATED = "plugin_deactivated" // EXPERIMENTAL - SUBJECT TO CHANGE ) type WebSocketMessage interface { diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/client.go deleted file mode 100644 index 43a87c75..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/client.go +++ /dev/null @@ -1,392 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "bytes" - "crypto/tls" - "encoding/base64" - "errors" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "strings" - "time" -) - -// ErrBadHandshake is returned when the server response to opening handshake is -// invalid. -var ErrBadHandshake = errors.New("websocket: bad handshake") - -var errInvalidCompression = errors.New("websocket: invalid compression negotiation") - -// NewClient creates a new client connection using the given net connection. -// The URL u specifies the host and request URI. Use requestHeader to specify -// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies -// (Cookie). Use the response.Header to get the selected subprotocol -// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). -// -// If the WebSocket handshake fails, ErrBadHandshake is returned along with a -// non-nil *http.Response so that callers can handle redirects, authentication, -// etc. -// -// Deprecated: Use Dialer instead. -func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { - d := Dialer{ - ReadBufferSize: readBufSize, - WriteBufferSize: writeBufSize, - NetDial: func(net, addr string) (net.Conn, error) { - return netConn, nil - }, - } - return d.Dial(u.String(), requestHeader) -} - -// A Dialer contains options for connecting to WebSocket server. -type Dialer struct { - // NetDial specifies the dial function for creating TCP connections. If - // NetDial is nil, net.Dial is used. - NetDial func(network, addr string) (net.Conn, error) - - // Proxy specifies a function to return a proxy for a given - // Request. If the function returns a non-nil error, the - // request is aborted with the provided error. - // If Proxy is nil or returns a nil *URL, no proxy is used. - Proxy func(*http.Request) (*url.URL, error) - - // TLSClientConfig specifies the TLS configuration to use with tls.Client. - // If nil, the default configuration is used. - TLSClientConfig *tls.Config - - // HandshakeTimeout specifies the duration for the handshake to complete. - HandshakeTimeout time.Duration - - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer - // size is zero, then a useful default size is used. The I/O buffer sizes - // do not limit the size of the messages that can be sent or received. - ReadBufferSize, WriteBufferSize int - - // Subprotocols specifies the client's requested subprotocols. - Subprotocols []string - - // EnableCompression specifies if the client should attempt to negotiate - // per message compression (RFC 7692). Setting this value to true does not - // guarantee that compression will be supported. Currently only "no context - // takeover" modes are supported. - EnableCompression bool - - // Jar specifies the cookie jar. - // If Jar is nil, cookies are not sent in requests and ignored - // in responses. - Jar http.CookieJar -} - -var errMalformedURL = errors.New("malformed ws or wss URL") - -// parseURL parses the URL. -// -// This function is a replacement for the standard library url.Parse function. -// In Go 1.4 and earlier, url.Parse loses information from the path. -func parseURL(s string) (*url.URL, error) { - // From the RFC: - // - // ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ] - // wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ] - var u url.URL - switch { - case strings.HasPrefix(s, "ws://"): - u.Scheme = "ws" - s = s[len("ws://"):] - case strings.HasPrefix(s, "wss://"): - u.Scheme = "wss" - s = s[len("wss://"):] - default: - return nil, errMalformedURL - } - - if i := strings.Index(s, "?"); i >= 0 { - u.RawQuery = s[i+1:] - s = s[:i] - } - - if i := strings.Index(s, "/"); i >= 0 { - u.Opaque = s[i:] - s = s[:i] - } else { - u.Opaque = "/" - } - - u.Host = s - - if strings.Contains(u.Host, "@") { - // Don't bother parsing user information because user information is - // not allowed in websocket URIs. - return nil, errMalformedURL - } - - return &u, nil -} - -func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { - hostPort = u.Host - hostNoPort = u.Host - if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { - hostNoPort = hostNoPort[:i] - } else { - switch u.Scheme { - case "wss": - hostPort += ":443" - case "https": - hostPort += ":443" - default: - hostPort += ":80" - } - } - return hostPort, hostNoPort -} - -// DefaultDialer is a dialer with all fields set to the default zero values. -var DefaultDialer = &Dialer{ - Proxy: http.ProxyFromEnvironment, -} - -// Dial creates a new client connection. Use requestHeader to specify the -// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). -// Use the response.Header to get the selected subprotocol -// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). -// -// If the WebSocket handshake fails, ErrBadHandshake is returned along with a -// non-nil *http.Response so that callers can handle redirects, authentication, -// etcetera. The response body may not contain the entire response and does not -// need to be closed by the application. -func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { - - if d == nil { - d = &Dialer{ - Proxy: http.ProxyFromEnvironment, - } - } - - challengeKey, err := generateChallengeKey() - if err != nil { - return nil, nil, err - } - - u, err := parseURL(urlStr) - if err != nil { - return nil, nil, err - } - - switch u.Scheme { - case "ws": - u.Scheme = "http" - case "wss": - u.Scheme = "https" - default: - return nil, nil, errMalformedURL - } - - if u.User != nil { - // User name and password are not allowed in websocket URIs. - return nil, nil, errMalformedURL - } - - req := &http.Request{ - Method: "GET", - URL: u, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(http.Header), - Host: u.Host, - } - - // Set the cookies present in the cookie jar of the dialer - if d.Jar != nil { - for _, cookie := range d.Jar.Cookies(u) { - req.AddCookie(cookie) - } - } - - // Set the request headers using the capitalization for names and values in - // RFC examples. Although the capitalization shouldn't matter, there are - // servers that depend on it. The Header.Set method is not used because the - // method canonicalizes the header names. - req.Header["Upgrade"] = []string{"websocket"} - req.Header["Connection"] = []string{"Upgrade"} - req.Header["Sec-WebSocket-Key"] = []string{challengeKey} - req.Header["Sec-WebSocket-Version"] = []string{"13"} - if len(d.Subprotocols) > 0 { - req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} - } - for k, vs := range requestHeader { - switch { - case k == "Host": - if len(vs) > 0 { - req.Host = vs[0] - } - case k == "Upgrade" || - k == "Connection" || - k == "Sec-Websocket-Key" || - k == "Sec-Websocket-Version" || - k == "Sec-Websocket-Extensions" || - (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): - return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) - default: - req.Header[k] = vs - } - } - - if d.EnableCompression { - req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover") - } - - hostPort, hostNoPort := hostPortNoPort(u) - - var proxyURL *url.URL - // Check wether the proxy method has been configured - if d.Proxy != nil { - proxyURL, err = d.Proxy(req) - } - if err != nil { - return nil, nil, err - } - - var targetHostPort string - if proxyURL != nil { - targetHostPort, _ = hostPortNoPort(proxyURL) - } else { - targetHostPort = hostPort - } - - var deadline time.Time - if d.HandshakeTimeout != 0 { - deadline = time.Now().Add(d.HandshakeTimeout) - } - - netDial := d.NetDial - if netDial == nil { - netDialer := &net.Dialer{Deadline: deadline} - netDial = netDialer.Dial - } - - netConn, err := netDial("tcp", targetHostPort) - if err != nil { - return nil, nil, err - } - - defer func() { - if netConn != nil { - netConn.Close() - } - }() - - if err := netConn.SetDeadline(deadline); err != nil { - return nil, nil, err - } - - if proxyURL != nil { - connectHeader := make(http.Header) - if user := proxyURL.User; user != nil { - proxyUser := user.Username() - if proxyPassword, passwordSet := user.Password(); passwordSet { - credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) - connectHeader.Set("Proxy-Authorization", "Basic "+credential) - } - } - connectReq := &http.Request{ - Method: "CONNECT", - URL: &url.URL{Opaque: hostPort}, - Host: hostPort, - Header: connectHeader, - } - - connectReq.Write(netConn) - - // Read response. - // Okay to use and discard buffered reader here, because - // TLS server will not speak until spoken to. - br := bufio.NewReader(netConn) - resp, err := http.ReadResponse(br, connectReq) - if err != nil { - return nil, nil, err - } - if resp.StatusCode != 200 { - f := strings.SplitN(resp.Status, " ", 2) - return nil, nil, errors.New(f[1]) - } - } - - if u.Scheme == "https" { - cfg := cloneTLSConfig(d.TLSClientConfig) - if cfg.ServerName == "" { - cfg.ServerName = hostNoPort - } - tlsConn := tls.Client(netConn, cfg) - netConn = tlsConn - if err := tlsConn.Handshake(); err != nil { - return nil, nil, err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return nil, nil, err - } - } - } - - conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize) - - if err := req.Write(netConn); err != nil { - return nil, nil, err - } - - resp, err := http.ReadResponse(conn.br, req) - if err != nil { - return nil, nil, err - } - - if d.Jar != nil { - if rc := resp.Cookies(); len(rc) > 0 { - d.Jar.SetCookies(u, rc) - } - } - - if resp.StatusCode != 101 || - !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || - !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || - resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { - // Before closing the network connection on return from this - // function, slurp up some of the response to aid application - // debugging. - buf := make([]byte, 1024) - n, _ := io.ReadFull(resp.Body, buf) - resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) - return nil, resp, ErrBadHandshake - } - - for _, ext := range parseExtensions(resp.Header) { - if ext[""] != "permessage-deflate" { - continue - } - _, snct := ext["server_no_context_takeover"] - _, cnct := ext["client_no_context_takeover"] - if !snct || !cnct { - return nil, resp, errInvalidCompression - } - conn.newCompressionWriter = compressNoContextTakeover - conn.newDecompressionReader = decompressNoContextTakeover - break - } - - resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) - conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - - netConn.SetDeadline(time.Time{}) - netConn = nil // to avoid close in defer. - return conn, resp, nil -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/client_clone.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/client_clone.go deleted file mode 100644 index 4f0d9437..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/client_clone.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package websocket - -import "crypto/tls" - -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return cfg.Clone() -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/client_clone_legacy.go deleted file mode 100644 index babb007f..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/client_clone_legacy.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8 - -package websocket - -import "crypto/tls" - -// cloneTLSConfig clones all public fields except the fields -// SessionTicketsDisabled and SessionTicketKey. This avoids copying the -// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a -// config in active use. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - } -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/compression.go deleted file mode 100644 index 813ffb1e..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/compression.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "compress/flate" - "errors" - "io" - "strings" - "sync" -) - -const ( - minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 - maxCompressionLevel = flate.BestCompression - defaultCompressionLevel = 1 -) - -var ( - flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool - flateReaderPool = sync.Pool{New: func() interface{} { - return flate.NewReader(nil) - }} -) - -func decompressNoContextTakeover(r io.Reader) io.ReadCloser { - const tail = - // Add four bytes as specified in RFC - "\x00\x00\xff\xff" + - // Add final block to squelch unexpected EOF error from flate reader. - "\x01\x00\x00\xff\xff" - - fr, _ := flateReaderPool.Get().(io.ReadCloser) - fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) - return &flateReadWrapper{fr} -} - -func isValidCompressionLevel(level int) bool { - return minCompressionLevel <= level && level <= maxCompressionLevel -} - -func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { - p := &flateWriterPools[level-minCompressionLevel] - tw := &truncWriter{w: w} - fw, _ := p.Get().(*flate.Writer) - if fw == nil { - fw, _ = flate.NewWriter(tw, level) - } else { - fw.Reset(tw) - } - return &flateWriteWrapper{fw: fw, tw: tw, p: p} -} - -// truncWriter is an io.Writer that writes all but the last four bytes of the -// stream to another io.Writer. -type truncWriter struct { - w io.WriteCloser - n int - p [4]byte -} - -func (w *truncWriter) Write(p []byte) (int, error) { - n := 0 - - // fill buffer first for simplicity. - if w.n < len(w.p) { - n = copy(w.p[w.n:], p) - p = p[n:] - w.n += n - if len(p) == 0 { - return n, nil - } - } - - m := len(p) - if m > len(w.p) { - m = len(w.p) - } - - if nn, err := w.w.Write(w.p[:m]); err != nil { - return n + nn, err - } - - copy(w.p[:], w.p[m:]) - copy(w.p[len(w.p)-m:], p[len(p)-m:]) - nn, err := w.w.Write(p[:len(p)-m]) - return n + nn, err -} - -type flateWriteWrapper struct { - fw *flate.Writer - tw *truncWriter - p *sync.Pool -} - -func (w *flateWriteWrapper) Write(p []byte) (int, error) { - if w.fw == nil { - return 0, errWriteClosed - } - return w.fw.Write(p) -} - -func (w *flateWriteWrapper) Close() error { - if w.fw == nil { - return errWriteClosed - } - err1 := w.fw.Flush() - w.p.Put(w.fw) - w.fw = nil - if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { - return errors.New("websocket: internal error, unexpected bytes at end of flate stream") - } - err2 := w.tw.w.Close() - if err1 != nil { - return err1 - } - return err2 -} - -type flateReadWrapper struct { - fr io.ReadCloser -} - -func (r *flateReadWrapper) Read(p []byte) (int, error) { - if r.fr == nil { - return 0, io.ErrClosedPipe - } - n, err := r.fr.Read(p) - if err == io.EOF { - // Preemptively place the reader back in the pool. This helps with - // scenarios where the application does not call NextReader() soon after - // this final read. - r.Close() - } - return n, err -} - -func (r *flateReadWrapper) Close() error { - if r.fr == nil { - return io.ErrClosedPipe - } - err := r.fr.Close() - flateReaderPool.Put(r.fr) - r.fr = nil - return err -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/conn.go deleted file mode 100644 index 97e1dbac..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/conn.go +++ /dev/null @@ -1,1149 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "encoding/binary" - "errors" - "io" - "io/ioutil" - "math/rand" - "net" - "strconv" - "sync" - "time" - "unicode/utf8" -) - -const ( - // Frame header byte 0 bits from Section 5.2 of RFC 6455 - finalBit = 1 << 7 - rsv1Bit = 1 << 6 - rsv2Bit = 1 << 5 - rsv3Bit = 1 << 4 - - // Frame header byte 1 bits from Section 5.2 of RFC 6455 - maskBit = 1 << 7 - - maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask - maxControlFramePayloadSize = 125 - - writeWait = time.Second - - defaultReadBufferSize = 4096 - defaultWriteBufferSize = 4096 - - continuationFrame = 0 - noFrame = -1 -) - -// Close codes defined in RFC 6455, section 11.7. -const ( - CloseNormalClosure = 1000 - CloseGoingAway = 1001 - CloseProtocolError = 1002 - CloseUnsupportedData = 1003 - CloseNoStatusReceived = 1005 - CloseAbnormalClosure = 1006 - CloseInvalidFramePayloadData = 1007 - ClosePolicyViolation = 1008 - CloseMessageTooBig = 1009 - CloseMandatoryExtension = 1010 - CloseInternalServerErr = 1011 - CloseServiceRestart = 1012 - CloseTryAgainLater = 1013 - CloseTLSHandshake = 1015 -) - -// The message types are defined in RFC 6455, section 11.8. -const ( - // TextMessage denotes a text data message. The text message payload is - // interpreted as UTF-8 encoded text data. - TextMessage = 1 - - // BinaryMessage denotes a binary data message. - BinaryMessage = 2 - - // CloseMessage denotes a close control message. The optional message - // payload contains a numeric code and text. Use the FormatCloseMessage - // function to format a close message payload. - CloseMessage = 8 - - // PingMessage denotes a ping control message. The optional message payload - // is UTF-8 encoded text. - PingMessage = 9 - - // PongMessage denotes a ping control message. The optional message payload - // is UTF-8 encoded text. - PongMessage = 10 -) - -// ErrCloseSent is returned when the application writes a message to the -// connection after sending a close message. -var ErrCloseSent = errors.New("websocket: close sent") - -// ErrReadLimit is returned when reading a message that is larger than the -// read limit set for the connection. -var ErrReadLimit = errors.New("websocket: read limit exceeded") - -// netError satisfies the net Error interface. -type netError struct { - msg string - temporary bool - timeout bool -} - -func (e *netError) Error() string { return e.msg } -func (e *netError) Temporary() bool { return e.temporary } -func (e *netError) Timeout() bool { return e.timeout } - -// CloseError represents close frame. -type CloseError struct { - - // Code is defined in RFC 6455, section 11.7. - Code int - - // Text is the optional text payload. - Text string -} - -func (e *CloseError) Error() string { - s := []byte("websocket: close ") - s = strconv.AppendInt(s, int64(e.Code), 10) - switch e.Code { - case CloseNormalClosure: - s = append(s, " (normal)"...) - case CloseGoingAway: - s = append(s, " (going away)"...) - case CloseProtocolError: - s = append(s, " (protocol error)"...) - case CloseUnsupportedData: - s = append(s, " (unsupported data)"...) - case CloseNoStatusReceived: - s = append(s, " (no status)"...) - case CloseAbnormalClosure: - s = append(s, " (abnormal closure)"...) - case CloseInvalidFramePayloadData: - s = append(s, " (invalid payload data)"...) - case ClosePolicyViolation: - s = append(s, " (policy violation)"...) - case CloseMessageTooBig: - s = append(s, " (message too big)"...) - case CloseMandatoryExtension: - s = append(s, " (mandatory extension missing)"...) - case CloseInternalServerErr: - s = append(s, " (internal server error)"...) - case CloseTLSHandshake: - s = append(s, " (TLS handshake error)"...) - } - if e.Text != "" { - s = append(s, ": "...) - s = append(s, e.Text...) - } - return string(s) -} - -// IsCloseError returns boolean indicating whether the error is a *CloseError -// with one of the specified codes. -func IsCloseError(err error, codes ...int) bool { - if e, ok := err.(*CloseError); ok { - for _, code := range codes { - if e.Code == code { - return true - } - } - } - return false -} - -// IsUnexpectedCloseError returns boolean indicating whether the error is a -// *CloseError with a code not in the list of expected codes. -func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { - if e, ok := err.(*CloseError); ok { - for _, code := range expectedCodes { - if e.Code == code { - return false - } - } - return true - } - return false -} - -var ( - errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} - errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} - errBadWriteOpCode = errors.New("websocket: bad write message type") - errWriteClosed = errors.New("websocket: write closed") - errInvalidControlFrame = errors.New("websocket: invalid control frame") -) - -func newMaskKey() [4]byte { - n := rand.Uint32() - return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} -} - -func hideTempErr(err error) error { - if e, ok := err.(net.Error); ok && e.Temporary() { - err = &netError{msg: e.Error(), timeout: e.Timeout()} - } - return err -} - -func isControl(frameType int) bool { - return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage -} - -func isData(frameType int) bool { - return frameType == TextMessage || frameType == BinaryMessage -} - -var validReceivedCloseCodes = map[int]bool{ - // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number - - CloseNormalClosure: true, - CloseGoingAway: true, - CloseProtocolError: true, - CloseUnsupportedData: true, - CloseNoStatusReceived: false, - CloseAbnormalClosure: false, - CloseInvalidFramePayloadData: true, - ClosePolicyViolation: true, - CloseMessageTooBig: true, - CloseMandatoryExtension: true, - CloseInternalServerErr: true, - CloseServiceRestart: true, - CloseTryAgainLater: true, - CloseTLSHandshake: false, -} - -func isValidReceivedCloseCode(code int) bool { - return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) -} - -// The Conn type represents a WebSocket connection. -type Conn struct { - conn net.Conn - isServer bool - subprotocol string - - // Write fields - mu chan bool // used as mutex to protect write to conn - writeBuf []byte // frame is constructed in this buffer. - writeDeadline time.Time - writer io.WriteCloser // the current writer returned to the application - isWriting bool // for best-effort concurrent write detection - - writeErrMu sync.Mutex - writeErr error - - enableWriteCompression bool - compressionLevel int - newCompressionWriter func(io.WriteCloser, int) io.WriteCloser - - // Read fields - reader io.ReadCloser // the current reader returned to the application - readErr error - br *bufio.Reader - readRemaining int64 // bytes remaining in current frame. - readFinal bool // true the current message has more frames. - readLength int64 // Message size. - readLimit int64 // Maximum message size. - readMaskPos int - readMaskKey [4]byte - handlePong func(string) error - handlePing func(string) error - handleClose func(int, string) error - readErrCount int - messageReader *messageReader // the current low-level reader - - readDecompress bool // whether last read frame had RSV1 set - newDecompressionReader func(io.Reader) io.ReadCloser -} - -func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn { - return newConnBRW(conn, isServer, readBufferSize, writeBufferSize, nil) -} - -type writeHook struct { - p []byte -} - -func (wh *writeHook) Write(p []byte) (int, error) { - wh.p = p - return len(p), nil -} - -func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, brw *bufio.ReadWriter) *Conn { - mu := make(chan bool, 1) - mu <- true - - var br *bufio.Reader - if readBufferSize == 0 && brw != nil && brw.Reader != nil { - // Reuse the supplied bufio.Reader if the buffer has a useful size. - // This code assumes that peek on a reader returns - // bufio.Reader.buf[:0]. - brw.Reader.Reset(conn) - if p, err := brw.Reader.Peek(0); err == nil && cap(p) >= 256 { - br = brw.Reader - } - } - if br == nil { - if readBufferSize == 0 { - readBufferSize = defaultReadBufferSize - } - if readBufferSize < maxControlFramePayloadSize { - readBufferSize = maxControlFramePayloadSize - } - br = bufio.NewReaderSize(conn, readBufferSize) - } - - var writeBuf []byte - if writeBufferSize == 0 && brw != nil && brw.Writer != nil { - // Use the bufio.Writer's buffer if the buffer has a useful size. This - // code assumes that bufio.Writer.buf[:1] is passed to the - // bufio.Writer's underlying writer. - var wh writeHook - brw.Writer.Reset(&wh) - brw.Writer.WriteByte(0) - brw.Flush() - if cap(wh.p) >= maxFrameHeaderSize+256 { - writeBuf = wh.p[:cap(wh.p)] - } - } - - if writeBuf == nil { - if writeBufferSize == 0 { - writeBufferSize = defaultWriteBufferSize - } - writeBuf = make([]byte, writeBufferSize+maxFrameHeaderSize) - } - - c := &Conn{ - isServer: isServer, - br: br, - conn: conn, - mu: mu, - readFinal: true, - writeBuf: writeBuf, - enableWriteCompression: true, - compressionLevel: defaultCompressionLevel, - } - c.SetCloseHandler(nil) - c.SetPingHandler(nil) - c.SetPongHandler(nil) - return c -} - -// Subprotocol returns the negotiated protocol for the connection. -func (c *Conn) Subprotocol() string { - return c.subprotocol -} - -// Close closes the underlying network connection without sending or waiting for a close frame. -func (c *Conn) Close() error { - return c.conn.Close() -} - -// LocalAddr returns the local network address. -func (c *Conn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -// RemoteAddr returns the remote network address. -func (c *Conn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -// Write methods - -func (c *Conn) writeFatal(err error) error { - err = hideTempErr(err) - c.writeErrMu.Lock() - if c.writeErr == nil { - c.writeErr = err - } - c.writeErrMu.Unlock() - return err -} - -func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error { - <-c.mu - defer func() { c.mu <- true }() - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - c.conn.SetWriteDeadline(deadline) - for _, buf := range bufs { - if len(buf) > 0 { - _, err := c.conn.Write(buf) - if err != nil { - return c.writeFatal(err) - } - } - } - - if frameType == CloseMessage { - c.writeFatal(ErrCloseSent) - } - return nil -} - -// WriteControl writes a control message with the given deadline. The allowed -// message types are CloseMessage, PingMessage and PongMessage. -func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { - if !isControl(messageType) { - return errBadWriteOpCode - } - if len(data) > maxControlFramePayloadSize { - return errInvalidControlFrame - } - - b0 := byte(messageType) | finalBit - b1 := byte(len(data)) - if !c.isServer { - b1 |= maskBit - } - - buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) - buf = append(buf, b0, b1) - - if c.isServer { - buf = append(buf, data...) - } else { - key := newMaskKey() - buf = append(buf, key[:]...) - buf = append(buf, data...) - maskBytes(key, 0, buf[6:]) - } - - d := time.Hour * 1000 - if !deadline.IsZero() { - d = deadline.Sub(time.Now()) - if d < 0 { - return errWriteTimeout - } - } - - timer := time.NewTimer(d) - select { - case <-c.mu: - timer.Stop() - case <-timer.C: - return errWriteTimeout - } - defer func() { c.mu <- true }() - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - c.conn.SetWriteDeadline(deadline) - _, err = c.conn.Write(buf) - if err != nil { - return c.writeFatal(err) - } - if messageType == CloseMessage { - c.writeFatal(ErrCloseSent) - } - return err -} - -func (c *Conn) prepWrite(messageType int) error { - // Close previous writer if not already closed by the application. It's - // probably better to return an error in this situation, but we cannot - // change this without breaking existing applications. - if c.writer != nil { - c.writer.Close() - c.writer = nil - } - - if !isControl(messageType) && !isData(messageType) { - return errBadWriteOpCode - } - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - return err -} - -// NextWriter returns a writer for the next message to send. The writer's Close -// method flushes the complete message to the network. -// -// There can be at most one open writer on a connection. NextWriter closes the -// previous writer if the application has not already done so. -func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { - if err := c.prepWrite(messageType); err != nil { - return nil, err - } - - mw := &messageWriter{ - c: c, - frameType: messageType, - pos: maxFrameHeaderSize, - } - c.writer = mw - if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { - w := c.newCompressionWriter(c.writer, c.compressionLevel) - mw.compress = true - c.writer = w - } - return c.writer, nil -} - -type messageWriter struct { - c *Conn - compress bool // whether next call to flushFrame should set RSV1 - pos int // end of data in writeBuf. - frameType int // type of the current frame. - err error -} - -func (w *messageWriter) fatal(err error) error { - if w.err != nil { - w.err = err - w.c.writer = nil - } - return err -} - -// flushFrame writes buffered data and extra as a frame to the network. The -// final argument indicates that this is the last frame in the message. -func (w *messageWriter) flushFrame(final bool, extra []byte) error { - c := w.c - length := w.pos - maxFrameHeaderSize + len(extra) - - // Check for invalid control frames. - if isControl(w.frameType) && - (!final || length > maxControlFramePayloadSize) { - return w.fatal(errInvalidControlFrame) - } - - b0 := byte(w.frameType) - if final { - b0 |= finalBit - } - if w.compress { - b0 |= rsv1Bit - } - w.compress = false - - b1 := byte(0) - if !c.isServer { - b1 |= maskBit - } - - // Assume that the frame starts at beginning of c.writeBuf. - framePos := 0 - if c.isServer { - // Adjust up if mask not included in the header. - framePos = 4 - } - - switch { - case length >= 65536: - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | 127 - binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) - case length > 125: - framePos += 6 - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | 126 - binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) - default: - framePos += 8 - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | byte(length) - } - - if !c.isServer { - key := newMaskKey() - copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) - maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) - if len(extra) > 0 { - return c.writeFatal(errors.New("websocket: internal error, extra used in client mode")) - } - } - - // Write the buffers to the connection with best-effort detection of - // concurrent writes. See the concurrency section in the package - // documentation for more info. - - if c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = true - - err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) - - if !c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = false - - if err != nil { - return w.fatal(err) - } - - if final { - c.writer = nil - return nil - } - - // Setup for next frame. - w.pos = maxFrameHeaderSize - w.frameType = continuationFrame - return nil -} - -func (w *messageWriter) ncopy(max int) (int, error) { - n := len(w.c.writeBuf) - w.pos - if n <= 0 { - if err := w.flushFrame(false, nil); err != nil { - return 0, err - } - n = len(w.c.writeBuf) - w.pos - } - if n > max { - n = max - } - return n, nil -} - -func (w *messageWriter) Write(p []byte) (int, error) { - if w.err != nil { - return 0, w.err - } - - if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { - // Don't buffer large messages. - err := w.flushFrame(false, p) - if err != nil { - return 0, err - } - return len(p), nil - } - - nn := len(p) - for len(p) > 0 { - n, err := w.ncopy(len(p)) - if err != nil { - return 0, err - } - copy(w.c.writeBuf[w.pos:], p[:n]) - w.pos += n - p = p[n:] - } - return nn, nil -} - -func (w *messageWriter) WriteString(p string) (int, error) { - if w.err != nil { - return 0, w.err - } - - nn := len(p) - for len(p) > 0 { - n, err := w.ncopy(len(p)) - if err != nil { - return 0, err - } - copy(w.c.writeBuf[w.pos:], p[:n]) - w.pos += n - p = p[n:] - } - return nn, nil -} - -func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { - if w.err != nil { - return 0, w.err - } - for { - if w.pos == len(w.c.writeBuf) { - err = w.flushFrame(false, nil) - if err != nil { - break - } - } - var n int - n, err = r.Read(w.c.writeBuf[w.pos:]) - w.pos += n - nn += int64(n) - if err != nil { - if err == io.EOF { - err = nil - } - break - } - } - return nn, err -} - -func (w *messageWriter) Close() error { - if w.err != nil { - return w.err - } - if err := w.flushFrame(true, nil); err != nil { - return err - } - w.err = errWriteClosed - return nil -} - -// WritePreparedMessage writes prepared message into connection. -func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { - frameType, frameData, err := pm.frame(prepareKey{ - isServer: c.isServer, - compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), - compressionLevel: c.compressionLevel, - }) - if err != nil { - return err - } - if c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = true - err = c.write(frameType, c.writeDeadline, frameData, nil) - if !c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = false - return err -} - -// WriteMessage is a helper method for getting a writer using NextWriter, -// writing the message and closing the writer. -func (c *Conn) WriteMessage(messageType int, data []byte) error { - - if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { - // Fast path with no allocations and single frame. - - if err := c.prepWrite(messageType); err != nil { - return err - } - mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize} - n := copy(c.writeBuf[mw.pos:], data) - mw.pos += n - data = data[n:] - return mw.flushFrame(true, data) - } - - w, err := c.NextWriter(messageType) - if err != nil { - return err - } - if _, err = w.Write(data); err != nil { - return err - } - return w.Close() -} - -// SetWriteDeadline sets the write deadline on the underlying network -// connection. After a write has timed out, the websocket state is corrupt and -// all future writes will return an error. A zero value for t means writes will -// not time out. -func (c *Conn) SetWriteDeadline(t time.Time) error { - c.writeDeadline = t - return nil -} - -// Read methods - -func (c *Conn) advanceFrame() (int, error) { - - // 1. Skip remainder of previous frame. - - if c.readRemaining > 0 { - if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { - return noFrame, err - } - } - - // 2. Read and parse first two bytes of frame header. - - p, err := c.read(2) - if err != nil { - return noFrame, err - } - - final := p[0]&finalBit != 0 - frameType := int(p[0] & 0xf) - mask := p[1]&maskBit != 0 - c.readRemaining = int64(p[1] & 0x7f) - - c.readDecompress = false - if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { - c.readDecompress = true - p[0] &^= rsv1Bit - } - - if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 { - return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16)) - } - - switch frameType { - case CloseMessage, PingMessage, PongMessage: - if c.readRemaining > maxControlFramePayloadSize { - return noFrame, c.handleProtocolError("control frame length > 125") - } - if !final { - return noFrame, c.handleProtocolError("control frame not final") - } - case TextMessage, BinaryMessage: - if !c.readFinal { - return noFrame, c.handleProtocolError("message start before final message frame") - } - c.readFinal = final - case continuationFrame: - if c.readFinal { - return noFrame, c.handleProtocolError("continuation after final message frame") - } - c.readFinal = final - default: - return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) - } - - // 3. Read and parse frame length. - - switch c.readRemaining { - case 126: - p, err := c.read(2) - if err != nil { - return noFrame, err - } - c.readRemaining = int64(binary.BigEndian.Uint16(p)) - case 127: - p, err := c.read(8) - if err != nil { - return noFrame, err - } - c.readRemaining = int64(binary.BigEndian.Uint64(p)) - } - - // 4. Handle frame masking. - - if mask != c.isServer { - return noFrame, c.handleProtocolError("incorrect mask flag") - } - - if mask { - c.readMaskPos = 0 - p, err := c.read(len(c.readMaskKey)) - if err != nil { - return noFrame, err - } - copy(c.readMaskKey[:], p) - } - - // 5. For text and binary messages, enforce read limit and return. - - if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { - - c.readLength += c.readRemaining - if c.readLimit > 0 && c.readLength > c.readLimit { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) - return noFrame, ErrReadLimit - } - - return frameType, nil - } - - // 6. Read control frame payload. - - var payload []byte - if c.readRemaining > 0 { - payload, err = c.read(int(c.readRemaining)) - c.readRemaining = 0 - if err != nil { - return noFrame, err - } - if c.isServer { - maskBytes(c.readMaskKey, 0, payload) - } - } - - // 7. Process control frame payload. - - switch frameType { - case PongMessage: - if err := c.handlePong(string(payload)); err != nil { - return noFrame, err - } - case PingMessage: - if err := c.handlePing(string(payload)); err != nil { - return noFrame, err - } - case CloseMessage: - closeCode := CloseNoStatusReceived - closeText := "" - if len(payload) >= 2 { - closeCode = int(binary.BigEndian.Uint16(payload)) - if !isValidReceivedCloseCode(closeCode) { - return noFrame, c.handleProtocolError("invalid close code") - } - closeText = string(payload[2:]) - if !utf8.ValidString(closeText) { - return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") - } - } - if err := c.handleClose(closeCode, closeText); err != nil { - return noFrame, err - } - return noFrame, &CloseError{Code: closeCode, Text: closeText} - } - - return frameType, nil -} - -func (c *Conn) handleProtocolError(message string) error { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) - return errors.New("websocket: " + message) -} - -// NextReader returns the next data message received from the peer. The -// returned messageType is either TextMessage or BinaryMessage. -// -// There can be at most one open reader on a connection. NextReader discards -// the previous message if the application has not already consumed it. -// -// Applications must break out of the application's read loop when this method -// returns a non-nil error value. Errors returned from this method are -// permanent. Once this method returns a non-nil error, all subsequent calls to -// this method return the same error. -func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { - // Close previous reader, only relevant for decompression. - if c.reader != nil { - c.reader.Close() - c.reader = nil - } - - c.messageReader = nil - c.readLength = 0 - - for c.readErr == nil { - frameType, err := c.advanceFrame() - if err != nil { - c.readErr = hideTempErr(err) - break - } - if frameType == TextMessage || frameType == BinaryMessage { - c.messageReader = &messageReader{c} - c.reader = c.messageReader - if c.readDecompress { - c.reader = c.newDecompressionReader(c.reader) - } - return frameType, c.reader, nil - } - } - - // Applications that do handle the error returned from this method spin in - // tight loop on connection failure. To help application developers detect - // this error, panic on repeated reads to the failed connection. - c.readErrCount++ - if c.readErrCount >= 1000 { - panic("repeated read on failed websocket connection") - } - - return noFrame, nil, c.readErr -} - -type messageReader struct{ c *Conn } - -func (r *messageReader) Read(b []byte) (int, error) { - c := r.c - if c.messageReader != r { - return 0, io.EOF - } - - for c.readErr == nil { - - if c.readRemaining > 0 { - if int64(len(b)) > c.readRemaining { - b = b[:c.readRemaining] - } - n, err := c.br.Read(b) - c.readErr = hideTempErr(err) - if c.isServer { - c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) - } - c.readRemaining -= int64(n) - if c.readRemaining > 0 && c.readErr == io.EOF { - c.readErr = errUnexpectedEOF - } - return n, c.readErr - } - - if c.readFinal { - c.messageReader = nil - return 0, io.EOF - } - - frameType, err := c.advanceFrame() - switch { - case err != nil: - c.readErr = hideTempErr(err) - case frameType == TextMessage || frameType == BinaryMessage: - c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") - } - } - - err := c.readErr - if err == io.EOF && c.messageReader == r { - err = errUnexpectedEOF - } - return 0, err -} - -func (r *messageReader) Close() error { - return nil -} - -// ReadMessage is a helper method for getting a reader using NextReader and -// reading from that reader to a buffer. -func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { - var r io.Reader - messageType, r, err = c.NextReader() - if err != nil { - return messageType, nil, err - } - p, err = ioutil.ReadAll(r) - return messageType, p, err -} - -// SetReadDeadline sets the read deadline on the underlying network connection. -// After a read has timed out, the websocket connection state is corrupt and -// all future reads will return an error. A zero value for t means reads will -// not time out. -func (c *Conn) SetReadDeadline(t time.Time) error { - return c.conn.SetReadDeadline(t) -} - -// SetReadLimit sets the maximum size for a message read from the peer. If a -// message exceeds the limit, the connection sends a close frame to the peer -// and returns ErrReadLimit to the application. -func (c *Conn) SetReadLimit(limit int64) { - c.readLimit = limit -} - -// CloseHandler returns the current close handler -func (c *Conn) CloseHandler() func(code int, text string) error { - return c.handleClose -} - -// SetCloseHandler sets the handler for close messages received from the peer. -// The code argument to h is the received close code or CloseNoStatusReceived -// if the close message is empty. The default close handler sends a close frame -// back to the peer. -// -// The application must read the connection to process close messages as -// described in the section on Control Frames above. -// -// The connection read methods return a CloseError when a close frame is -// received. Most applications should handle close messages as part of their -// normal error handling. Applications should only set a close handler when the -// application must perform some action before sending a close frame back to -// the peer. -func (c *Conn) SetCloseHandler(h func(code int, text string) error) { - if h == nil { - h = func(code int, text string) error { - message := []byte{} - if code != CloseNoStatusReceived { - message = FormatCloseMessage(code, "") - } - c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) - return nil - } - } - c.handleClose = h -} - -// PingHandler returns the current ping handler -func (c *Conn) PingHandler() func(appData string) error { - return c.handlePing -} - -// SetPingHandler sets the handler for ping messages received from the peer. -// The appData argument to h is the PING frame application data. The default -// ping handler sends a pong to the peer. -// -// The application must read the connection to process ping messages as -// described in the section on Control Frames above. -func (c *Conn) SetPingHandler(h func(appData string) error) { - if h == nil { - h = func(message string) error { - err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) - if err == ErrCloseSent { - return nil - } else if e, ok := err.(net.Error); ok && e.Temporary() { - return nil - } - return err - } - } - c.handlePing = h -} - -// PongHandler returns the current pong handler -func (c *Conn) PongHandler() func(appData string) error { - return c.handlePong -} - -// SetPongHandler sets the handler for pong messages received from the peer. -// The appData argument to h is the PONG frame application data. The default -// pong handler does nothing. -// -// The application must read the connection to process ping messages as -// described in the section on Control Frames above. -func (c *Conn) SetPongHandler(h func(appData string) error) { - if h == nil { - h = func(string) error { return nil } - } - c.handlePong = h -} - -// UnderlyingConn returns the internal net.Conn. This can be used to further -// modifications to connection specific flags. -func (c *Conn) UnderlyingConn() net.Conn { - return c.conn -} - -// EnableWriteCompression enables and disables write compression of -// subsequent text and binary messages. This function is a noop if -// compression was not negotiated with the peer. -func (c *Conn) EnableWriteCompression(enable bool) { - c.enableWriteCompression = enable -} - -// SetCompressionLevel sets the flate compression level for subsequent text and -// binary messages. This function is a noop if compression was not negotiated -// with the peer. See the compress/flate package for a description of -// compression levels. -func (c *Conn) SetCompressionLevel(level int) error { - if !isValidCompressionLevel(level) { - return errors.New("websocket: invalid compression level") - } - c.compressionLevel = level - return nil -} - -// FormatCloseMessage formats closeCode and text as a WebSocket close message. -func FormatCloseMessage(closeCode int, text string) []byte { - buf := make([]byte, 2+len(text)) - binary.BigEndian.PutUint16(buf, uint16(closeCode)) - copy(buf[2:], text) - return buf -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/conn_read.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/conn_read.go deleted file mode 100644 index 1ea15059..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/conn_read.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.5 - -package websocket - -import "io" - -func (c *Conn) read(n int) ([]byte, error) { - p, err := c.br.Peek(n) - if err == io.EOF { - err = errUnexpectedEOF - } - c.br.Discard(len(p)) - return p, err -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/conn_read_legacy.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/conn_read_legacy.go deleted file mode 100644 index 018541cf..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/conn_read_legacy.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.5 - -package websocket - -import "io" - -func (c *Conn) read(n int) ([]byte, error) { - p, err := c.br.Peek(n) - if err == io.EOF { - err = errUnexpectedEOF - } - if len(p) > 0 { - // advance over the bytes just read - io.ReadFull(c.br, p) - } - return p, err -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/doc.go deleted file mode 100644 index e291a952..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/doc.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package websocket implements the WebSocket protocol defined in RFC 6455. -// -// Overview -// -// The Conn type represents a WebSocket connection. A server application uses -// the Upgrade function from an Upgrader object with a HTTP request handler -// to get a pointer to a Conn: -// -// var upgrader = websocket.Upgrader{ -// ReadBufferSize: 1024, -// WriteBufferSize: 1024, -// } -// -// func handler(w http.ResponseWriter, r *http.Request) { -// conn, err := upgrader.Upgrade(w, r, nil) -// if err != nil { -// log.Println(err) -// return -// } -// ... Use conn to send and receive messages. -// } -// -// Call the connection's WriteMessage and ReadMessage methods to send and -// receive messages as a slice of bytes. This snippet of code shows how to echo -// messages using these methods: -// -// for { -// messageType, p, err := conn.ReadMessage() -// if err != nil { -// return -// } -// if err = conn.WriteMessage(messageType, p); err != nil { -// return err -// } -// } -// -// In above snippet of code, p is a []byte and messageType is an int with value -// websocket.BinaryMessage or websocket.TextMessage. -// -// An application can also send and receive messages using the io.WriteCloser -// and io.Reader interfaces. To send a message, call the connection NextWriter -// method to get an io.WriteCloser, write the message to the writer and close -// the writer when done. To receive a message, call the connection NextReader -// method to get an io.Reader and read until io.EOF is returned. This snippet -// shows how to echo messages using the NextWriter and NextReader methods: -// -// for { -// messageType, r, err := conn.NextReader() -// if err != nil { -// return -// } -// w, err := conn.NextWriter(messageType) -// if err != nil { -// return err -// } -// if _, err := io.Copy(w, r); err != nil { -// return err -// } -// if err := w.Close(); err != nil { -// return err -// } -// } -// -// Data Messages -// -// The WebSocket protocol distinguishes between text and binary data messages. -// Text messages are interpreted as UTF-8 encoded text. The interpretation of -// binary messages is left to the application. -// -// This package uses the TextMessage and BinaryMessage integer constants to -// identify the two data message types. The ReadMessage and NextReader methods -// return the type of the received message. The messageType argument to the -// WriteMessage and NextWriter methods specifies the type of a sent message. -// -// It is the application's responsibility to ensure that text messages are -// valid UTF-8 encoded text. -// -// Control Messages -// -// The WebSocket protocol defines three types of control messages: close, ping -// and pong. Call the connection WriteControl, WriteMessage or NextWriter -// methods to send a control message to the peer. -// -// Connections handle received close messages by sending a close message to the -// peer and returning a *CloseError from the the NextReader, ReadMessage or the -// message Read method. -// -// Connections handle received ping and pong messages by invoking callback -// functions set with SetPingHandler and SetPongHandler methods. The callback -// functions are called from the NextReader, ReadMessage and the message Read -// methods. -// -// The default ping handler sends a pong to the peer. The application's reading -// goroutine can block for a short time while the handler writes the pong data -// to the connection. -// -// The application must read the connection to process ping, pong and close -// messages sent from the peer. If the application is not otherwise interested -// in messages from the peer, then the application should start a goroutine to -// read and discard messages from the peer. A simple example is: -// -// func readLoop(c *websocket.Conn) { -// for { -// if _, _, err := c.NextReader(); err != nil { -// c.Close() -// break -// } -// } -// } -// -// Concurrency -// -// Connections support one concurrent reader and one concurrent writer. -// -// Applications are responsible for ensuring that no more than one goroutine -// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, -// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and -// that no more than one goroutine calls the read methods (NextReader, -// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) -// concurrently. -// -// The Close and WriteControl methods can be called concurrently with all other -// methods. -// -// Origin Considerations -// -// Web browsers allow Javascript applications to open a WebSocket connection to -// any host. It's up to the server to enforce an origin policy using the Origin -// request header sent by the browser. -// -// The Upgrader calls the function specified in the CheckOrigin field to check -// the origin. If the CheckOrigin function returns false, then the Upgrade -// method fails the WebSocket handshake with HTTP status 403. -// -// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail -// the handshake if the Origin request header is present and not equal to the -// Host request header. -// -// An application can allow connections from any origin by specifying a -// function that always returns true: -// -// var upgrader = websocket.Upgrader{ -// CheckOrigin: func(r *http.Request) bool { return true }, -// } -// -// The deprecated Upgrade function does not enforce an origin policy. It's the -// application's responsibility to check the Origin header before calling -// Upgrade. -// -// Compression EXPERIMENTAL -// -// Per message compression extensions (RFC 7692) are experimentally supported -// by this package in a limited capacity. Setting the EnableCompression option -// to true in Dialer or Upgrader will attempt to negotiate per message deflate -// support. -// -// var upgrader = websocket.Upgrader{ -// EnableCompression: true, -// } -// -// If compression was successfully negotiated with the connection's peer, any -// message received in compressed form will be automatically decompressed. -// All Read methods will return uncompressed bytes. -// -// Per message compression of messages written to a connection can be enabled -// or disabled by calling the corresponding Conn method: -// -// conn.EnableWriteCompression(false) -// -// Currently this package does not support compression with "context takeover". -// This means that messages must be compressed and decompressed in isolation, -// without retaining sliding window or dictionary state across messages. For -// more details refer to RFC 7692. -// -// Use of compression is experimental and may result in decreased performance. -package websocket diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/autobahn/server.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/autobahn/server.go deleted file mode 100644 index 3db880f9..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/autobahn/server.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Command server is a test server for the Autobahn WebSockets Test Suite. -package main - -import ( - "errors" - "flag" - "io" - "log" - "net/http" - "time" - "unicode/utf8" - - "github.com/gorilla/websocket" -) - -var upgrader = websocket.Upgrader{ - ReadBufferSize: 4096, - WriteBufferSize: 4096, - EnableCompression: true, - CheckOrigin: func(r *http.Request) bool { - return true - }, -} - -// echoCopy echoes messages from the client using io.Copy. -func echoCopy(w http.ResponseWriter, r *http.Request, writerOnly bool) { - conn, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Println("Upgrade:", err) - return - } - defer conn.Close() - for { - mt, r, err := conn.NextReader() - if err != nil { - if err != io.EOF { - log.Println("NextReader:", err) - } - return - } - if mt == websocket.TextMessage { - r = &validator{r: r} - } - w, err := conn.NextWriter(mt) - if err != nil { - log.Println("NextWriter:", err) - return - } - if mt == websocket.TextMessage { - r = &validator{r: r} - } - if writerOnly { - _, err = io.Copy(struct{ io.Writer }{w}, r) - } else { - _, err = io.Copy(w, r) - } - if err != nil { - if err == errInvalidUTF8 { - conn.WriteControl(websocket.CloseMessage, - websocket.FormatCloseMessage(websocket.CloseInvalidFramePayloadData, ""), - time.Time{}) - } - log.Println("Copy:", err) - return - } - err = w.Close() - if err != nil { - log.Println("Close:", err) - return - } - } -} - -func echoCopyWriterOnly(w http.ResponseWriter, r *http.Request) { - echoCopy(w, r, true) -} - -func echoCopyFull(w http.ResponseWriter, r *http.Request) { - echoCopy(w, r, false) -} - -// echoReadAll echoes messages from the client by reading the entire message -// with ioutil.ReadAll. -func echoReadAll(w http.ResponseWriter, r *http.Request, writeMessage, writePrepared bool) { - conn, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Println("Upgrade:", err) - return - } - defer conn.Close() - for { - mt, b, err := conn.ReadMessage() - if err != nil { - if err != io.EOF { - log.Println("NextReader:", err) - } - return - } - if mt == websocket.TextMessage { - if !utf8.Valid(b) { - conn.WriteControl(websocket.CloseMessage, - websocket.FormatCloseMessage(websocket.CloseInvalidFramePayloadData, ""), - time.Time{}) - log.Println("ReadAll: invalid utf8") - } - } - if writeMessage { - if !writePrepared { - err = conn.WriteMessage(mt, b) - if err != nil { - log.Println("WriteMessage:", err) - } - } else { - pm, err := websocket.NewPreparedMessage(mt, b) - if err != nil { - log.Println("NewPreparedMessage:", err) - return - } - err = conn.WritePreparedMessage(pm) - if err != nil { - log.Println("WritePreparedMessage:", err) - } - } - } else { - w, err := conn.NextWriter(mt) - if err != nil { - log.Println("NextWriter:", err) - return - } - if _, err := w.Write(b); err != nil { - log.Println("Writer:", err) - return - } - if err := w.Close(); err != nil { - log.Println("Close:", err) - return - } - } - } -} - -func echoReadAllWriter(w http.ResponseWriter, r *http.Request) { - echoReadAll(w, r, false, false) -} - -func echoReadAllWriteMessage(w http.ResponseWriter, r *http.Request) { - echoReadAll(w, r, true, false) -} - -func echoReadAllWritePreparedMessage(w http.ResponseWriter, r *http.Request) { - echoReadAll(w, r, true, true) -} - -func serveHome(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - http.Error(w, "Not found.", 404) - return - } - if r.Method != "GET" { - http.Error(w, "Method not allowed", 405) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - io.WriteString(w, "<html><body>Echo Server</body></html>") -} - -var addr = flag.String("addr", ":9000", "http service address") - -func main() { - flag.Parse() - http.HandleFunc("/", serveHome) - http.HandleFunc("/c", echoCopyWriterOnly) - http.HandleFunc("/f", echoCopyFull) - http.HandleFunc("/r", echoReadAllWriter) - http.HandleFunc("/m", echoReadAllWriteMessage) - http.HandleFunc("/p", echoReadAllWritePreparedMessage) - err := http.ListenAndServe(*addr, nil) - if err != nil { - log.Fatal("ListenAndServe: ", err) - } -} - -type validator struct { - state int - x rune - r io.Reader -} - -var errInvalidUTF8 = errors.New("invalid utf8") - -func (r *validator) Read(p []byte) (int, error) { - n, err := r.r.Read(p) - state := r.state - x := r.x - for _, b := range p[:n] { - state, x = decode(state, x, b) - if state == utf8Reject { - break - } - } - r.state = state - r.x = x - if state == utf8Reject || (err == io.EOF && state != utf8Accept) { - return n, errInvalidUTF8 - } - return n, err -} - -// UTF-8 decoder from http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ -// -// Copyright (c) 2008-2009 Bjoern Hoehrmann <bjoern@hoehrmann.de> -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to -// deal in the Software without restriction, including without limitation the -// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -// sell copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -// IN THE SOFTWARE. -var utf8d = [...]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00..1f - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20..3f - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40..5f - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60..7f - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // 80..9f - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // a0..bf - 8, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0..df - 0xa, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, // e0..ef - 0xb, 0x6, 0x6, 0x6, 0x5, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, // f0..ff - 0x0, 0x1, 0x2, 0x3, 0x5, 0x8, 0x7, 0x1, 0x1, 0x1, 0x4, 0x6, 0x1, 0x1, 0x1, 0x1, // s0..s0 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, // s1..s2 - 1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // s3..s4 - 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, // s5..s6 - 1, 3, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // s7..s8 -} - -const ( - utf8Accept = 0 - utf8Reject = 1 -) - -func decode(state int, x rune, b byte) (int, rune) { - t := utf8d[b] - if state != utf8Accept { - x = rune(b&0x3f) | (x << 6) - } else { - x = rune((0xff >> t) & b) - } - state = int(utf8d[256+state*16+int(t)]) - return state, x -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/chat/client.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/chat/client.go deleted file mode 100644 index ecfd9a7a..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/chat/client.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bytes" - "log" - "net/http" - "time" - - "github.com/gorilla/websocket" -) - -const ( - // Time allowed to write a message to the peer. - writeWait = 10 * time.Second - - // Time allowed to read the next pong message from the peer. - pongWait = 60 * time.Second - - // Send pings to peer with this period. Must be less than pongWait. - pingPeriod = (pongWait * 9) / 10 - - // Maximum message size allowed from peer. - maxMessageSize = 512 -) - -var ( - newline = []byte{'\n'} - space = []byte{' '} -) - -var upgrader = websocket.Upgrader{ - ReadBufferSize: 1024, - WriteBufferSize: 1024, -} - -// Client is a middleman between the websocket connection and the hub. -type Client struct { - hub *Hub - - // The websocket connection. - conn *websocket.Conn - - // Buffered channel of outbound messages. - send chan []byte -} - -// readPump pumps messages from the websocket connection to the hub. -// -// The application runs readPump in a per-connection goroutine. The application -// ensures that there is at most one reader on a connection by executing all -// reads from this goroutine. -func (c *Client) readPump() { - defer func() { - c.hub.unregister <- c - c.conn.Close() - }() - c.conn.SetReadLimit(maxMessageSize) - c.conn.SetReadDeadline(time.Now().Add(pongWait)) - c.conn.SetPongHandler(func(string) error { c.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil }) - for { - _, message, err := c.conn.ReadMessage() - if err != nil { - if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway) { - log.Printf("error: %v", err) - } - break - } - message = bytes.TrimSpace(bytes.Replace(message, newline, space, -1)) - c.hub.broadcast <- message - } -} - -// writePump pumps messages from the hub to the websocket connection. -// -// A goroutine running writePump is started for each connection. The -// application ensures that there is at most one writer to a connection by -// executing all writes from this goroutine. -func (c *Client) writePump() { - ticker := time.NewTicker(pingPeriod) - defer func() { - ticker.Stop() - c.conn.Close() - }() - for { - select { - case message, ok := <-c.send: - c.conn.SetWriteDeadline(time.Now().Add(writeWait)) - if !ok { - // The hub closed the channel. - c.conn.WriteMessage(websocket.CloseMessage, []byte{}) - return - } - - w, err := c.conn.NextWriter(websocket.TextMessage) - if err != nil { - return - } - w.Write(message) - - // Add queued chat messages to the current websocket message. - n := len(c.send) - for i := 0; i < n; i++ { - w.Write(newline) - w.Write(<-c.send) - } - - if err := w.Close(); err != nil { - return - } - case <-ticker.C: - c.conn.SetWriteDeadline(time.Now().Add(writeWait)) - if err := c.conn.WriteMessage(websocket.PingMessage, []byte{}); err != nil { - return - } - } - } -} - -// serveWs handles websocket requests from the peer. -func serveWs(hub *Hub, w http.ResponseWriter, r *http.Request) { - conn, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Println(err) - return - } - client := &Client{hub: hub, conn: conn, send: make(chan []byte, 256)} - client.hub.register <- client - - // Allow collection of memory referenced by the caller by doing all work in - // new goroutines. - go client.writePump() - go client.readPump() -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/chat/hub.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/chat/hub.go deleted file mode 100644 index 7f07ea07..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/chat/hub.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -// hub maintains the set of active clients and broadcasts messages to the -// clients. -type Hub struct { - // Registered clients. - clients map[*Client]bool - - // Inbound messages from the clients. - broadcast chan []byte - - // Register requests from the clients. - register chan *Client - - // Unregister requests from clients. - unregister chan *Client -} - -func newHub() *Hub { - return &Hub{ - broadcast: make(chan []byte), - register: make(chan *Client), - unregister: make(chan *Client), - clients: make(map[*Client]bool), - } -} - -func (h *Hub) run() { - for { - select { - case client := <-h.register: - h.clients[client] = true - case client := <-h.unregister: - if _, ok := h.clients[client]; ok { - delete(h.clients, client) - close(client.send) - } - case message := <-h.broadcast: - for client := range h.clients { - select { - case client.send <- message: - default: - close(client.send) - delete(h.clients, client) - } - } - } - } -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/chat/main.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/chat/main.go deleted file mode 100644 index 74615d59..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/chat/main.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "flag" - "log" - "net/http" -) - -var addr = flag.String("addr", ":8080", "http service address") - -func serveHome(w http.ResponseWriter, r *http.Request) { - log.Println(r.URL) - if r.URL.Path != "/" { - http.Error(w, "Not found", 404) - return - } - if r.Method != "GET" { - http.Error(w, "Method not allowed", 405) - return - } - http.ServeFile(w, r, "home.html") -} - -func main() { - flag.Parse() - hub := newHub() - go hub.run() - http.HandleFunc("/", serveHome) - http.HandleFunc("/ws", func(w http.ResponseWriter, r *http.Request) { - serveWs(hub, w, r) - }) - err := http.ListenAndServe(*addr, nil) - if err != nil { - log.Fatal("ListenAndServe: ", err) - } -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/command/main.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/command/main.go deleted file mode 100644 index 239c5c85..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/command/main.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2015 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bufio" - "flag" - "io" - "log" - "net/http" - "os" - "os/exec" - "time" - - "github.com/gorilla/websocket" -) - -var ( - addr = flag.String("addr", "127.0.0.1:8080", "http service address") - cmdPath string -) - -const ( - // Time allowed to write a message to the peer. - writeWait = 10 * time.Second - - // Maximum message size allowed from peer. - maxMessageSize = 8192 - - // Time allowed to read the next pong message from the peer. - pongWait = 60 * time.Second - - // Send pings to peer with this period. Must be less than pongWait. - pingPeriod = (pongWait * 9) / 10 - - // Time to wait before force close on connection. - closeGracePeriod = 10 * time.Second -) - -func pumpStdin(ws *websocket.Conn, w io.Writer) { - defer ws.Close() - ws.SetReadLimit(maxMessageSize) - ws.SetReadDeadline(time.Now().Add(pongWait)) - ws.SetPongHandler(func(string) error { ws.SetReadDeadline(time.Now().Add(pongWait)); return nil }) - for { - _, message, err := ws.ReadMessage() - if err != nil { - break - } - message = append(message, '\n') - if _, err := w.Write(message); err != nil { - break - } - } -} - -func pumpStdout(ws *websocket.Conn, r io.Reader, done chan struct{}) { - defer func() { - }() - s := bufio.NewScanner(r) - for s.Scan() { - ws.SetWriteDeadline(time.Now().Add(writeWait)) - if err := ws.WriteMessage(websocket.TextMessage, s.Bytes()); err != nil { - ws.Close() - break - } - } - if s.Err() != nil { - log.Println("scan:", s.Err()) - } - close(done) - - ws.SetWriteDeadline(time.Now().Add(writeWait)) - ws.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) - time.Sleep(closeGracePeriod) - ws.Close() -} - -func ping(ws *websocket.Conn, done chan struct{}) { - ticker := time.NewTicker(pingPeriod) - defer ticker.Stop() - for { - select { - case <-ticker.C: - if err := ws.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(writeWait)); err != nil { - log.Println("ping:", err) - } - case <-done: - return - } - } -} - -func internalError(ws *websocket.Conn, msg string, err error) { - log.Println(msg, err) - ws.WriteMessage(websocket.TextMessage, []byte("Internal server error.")) -} - -var upgrader = websocket.Upgrader{} - -func serveWs(w http.ResponseWriter, r *http.Request) { - ws, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Println("upgrade:", err) - return - } - - defer ws.Close() - - outr, outw, err := os.Pipe() - if err != nil { - internalError(ws, "stdout:", err) - return - } - defer outr.Close() - defer outw.Close() - - inr, inw, err := os.Pipe() - if err != nil { - internalError(ws, "stdin:", err) - return - } - defer inr.Close() - defer inw.Close() - - proc, err := os.StartProcess(cmdPath, flag.Args(), &os.ProcAttr{ - Files: []*os.File{inr, outw, outw}, - }) - if err != nil { - internalError(ws, "start:", err) - return - } - - inr.Close() - outw.Close() - - stdoutDone := make(chan struct{}) - go pumpStdout(ws, outr, stdoutDone) - go ping(ws, stdoutDone) - - pumpStdin(ws, inw) - - // Some commands will exit when stdin is closed. - inw.Close() - - // Other commands need a bonk on the head. - if err := proc.Signal(os.Interrupt); err != nil { - log.Println("inter:", err) - } - - select { - case <-stdoutDone: - case <-time.After(time.Second): - // A bigger bonk on the head. - if err := proc.Signal(os.Kill); err != nil { - log.Println("term:", err) - } - <-stdoutDone - } - - if _, err := proc.Wait(); err != nil { - log.Println("wait:", err) - } -} - -func serveHome(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - http.Error(w, "Not found", 404) - return - } - if r.Method != "GET" { - http.Error(w, "Method not allowed", 405) - return - } - http.ServeFile(w, r, "home.html") -} - -func main() { - flag.Parse() - if len(flag.Args()) < 1 { - log.Fatal("must specify at least one argument") - } - var err error - cmdPath, err = exec.LookPath(flag.Args()[0]) - if err != nil { - log.Fatal(err) - } - http.HandleFunc("/", serveHome) - http.HandleFunc("/ws", serveWs) - log.Fatal(http.ListenAndServe(*addr, nil)) -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/echo/client.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/echo/client.go deleted file mode 100644 index 6578094e..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/echo/client.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2015 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "flag" - "log" - "net/url" - "os" - "os/signal" - "time" - - "github.com/gorilla/websocket" -) - -var addr = flag.String("addr", "localhost:8080", "http service address") - -func main() { - flag.Parse() - log.SetFlags(0) - - interrupt := make(chan os.Signal, 1) - signal.Notify(interrupt, os.Interrupt) - - u := url.URL{Scheme: "ws", Host: *addr, Path: "/echo"} - log.Printf("connecting to %s", u.String()) - - c, _, err := websocket.DefaultDialer.Dial(u.String(), nil) - if err != nil { - log.Fatal("dial:", err) - } - defer c.Close() - - done := make(chan struct{}) - - go func() { - defer c.Close() - defer close(done) - for { - _, message, err := c.ReadMessage() - if err != nil { - log.Println("read:", err) - return - } - log.Printf("recv: %s", message) - } - }() - - ticker := time.NewTicker(time.Second) - defer ticker.Stop() - - for { - select { - case t := <-ticker.C: - err := c.WriteMessage(websocket.TextMessage, []byte(t.String())) - if err != nil { - log.Println("write:", err) - return - } - case <-interrupt: - log.Println("interrupt") - // To cleanly close a connection, a client should send a close - // frame and wait for the server to close the connection. - err := c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) - if err != nil { - log.Println("write close:", err) - return - } - select { - case <-done: - case <-time.After(time.Second): - } - c.Close() - return - } - } -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/echo/server.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/echo/server.go deleted file mode 100644 index a685b097..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/echo/server.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2015 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "flag" - "html/template" - "log" - "net/http" - - "github.com/gorilla/websocket" -) - -var addr = flag.String("addr", "localhost:8080", "http service address") - -var upgrader = websocket.Upgrader{} // use default options - -func echo(w http.ResponseWriter, r *http.Request) { - c, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Print("upgrade:", err) - return - } - defer c.Close() - for { - mt, message, err := c.ReadMessage() - if err != nil { - log.Println("read:", err) - break - } - log.Printf("recv: %s", message) - err = c.WriteMessage(mt, message) - if err != nil { - log.Println("write:", err) - break - } - } -} - -func home(w http.ResponseWriter, r *http.Request) { - homeTemplate.Execute(w, "ws://"+r.Host+"/echo") -} - -func main() { - flag.Parse() - log.SetFlags(0) - http.HandleFunc("/echo", echo) - http.HandleFunc("/", home) - log.Fatal(http.ListenAndServe(*addr, nil)) -} - -var homeTemplate = template.Must(template.New("").Parse(` -<!DOCTYPE html> -<head> -<meta charset="utf-8"> -<script> -window.addEventListener("load", function(evt) { - - var output = document.getElementById("output"); - var input = document.getElementById("input"); - var ws; - - var print = function(message) { - var d = document.createElement("div"); - d.innerHTML = message; - output.appendChild(d); - }; - - document.getElementById("open").onclick = function(evt) { - if (ws) { - return false; - } - ws = new WebSocket("{{.}}"); - ws.onopen = function(evt) { - print("OPEN"); - } - ws.onclose = function(evt) { - print("CLOSE"); - ws = null; - } - ws.onmessage = function(evt) { - print("RESPONSE: " + evt.data); - } - ws.onerror = function(evt) { - print("ERROR: " + evt.data); - } - return false; - }; - - document.getElementById("send").onclick = function(evt) { - if (!ws) { - return false; - } - print("SEND: " + input.value); - ws.send(input.value); - return false; - }; - - document.getElementById("close").onclick = function(evt) { - if (!ws) { - return false; - } - ws.close(); - return false; - }; - -}); -</script> -</head> -<body> -<table> -<tr><td valign="top" width="50%"> -<p>Click "Open" to create a connection to the server, -"Send" to send a message to the server and "Close" to close the connection. -You can change the message and send multiple times. -<p> -<form> -<button id="open">Open</button> -<button id="close">Close</button> -<p><input id="input" type="text" value="Hello world!"> -<button id="send">Send</button> -</form> -</td><td valign="top" width="50%"> -<div id="output"></div> -</td></tr></table> -</body> -</html> -`)) diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/filewatch/main.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/filewatch/main.go deleted file mode 100644 index f5f9da5c..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/examples/filewatch/main.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "flag" - "html/template" - "io/ioutil" - "log" - "net/http" - "os" - "strconv" - "time" - - "github.com/gorilla/websocket" -) - -const ( - // Time allowed to write the file to the client. - writeWait = 10 * time.Second - - // Time allowed to read the next pong message from the client. - pongWait = 60 * time.Second - - // Send pings to client with this period. Must be less than pongWait. - pingPeriod = (pongWait * 9) / 10 - - // Poll file for changes with this period. - filePeriod = 10 * time.Second -) - -var ( - addr = flag.String("addr", ":8080", "http service address") - homeTempl = template.Must(template.New("").Parse(homeHTML)) - filename string - upgrader = websocket.Upgrader{ - ReadBufferSize: 1024, - WriteBufferSize: 1024, - } -) - -func readFileIfModified(lastMod time.Time) ([]byte, time.Time, error) { - fi, err := os.Stat(filename) - if err != nil { - return nil, lastMod, err - } - if !fi.ModTime().After(lastMod) { - return nil, lastMod, nil - } - p, err := ioutil.ReadFile(filename) - if err != nil { - return nil, fi.ModTime(), err - } - return p, fi.ModTime(), nil -} - -func reader(ws *websocket.Conn) { - defer ws.Close() - ws.SetReadLimit(512) - ws.SetReadDeadline(time.Now().Add(pongWait)) - ws.SetPongHandler(func(string) error { ws.SetReadDeadline(time.Now().Add(pongWait)); return nil }) - for { - _, _, err := ws.ReadMessage() - if err != nil { - break - } - } -} - -func writer(ws *websocket.Conn, lastMod time.Time) { - lastError := "" - pingTicker := time.NewTicker(pingPeriod) - fileTicker := time.NewTicker(filePeriod) - defer func() { - pingTicker.Stop() - fileTicker.Stop() - ws.Close() - }() - for { - select { - case <-fileTicker.C: - var p []byte - var err error - - p, lastMod, err = readFileIfModified(lastMod) - - if err != nil { - if s := err.Error(); s != lastError { - lastError = s - p = []byte(lastError) - } - } else { - lastError = "" - } - - if p != nil { - ws.SetWriteDeadline(time.Now().Add(writeWait)) - if err := ws.WriteMessage(websocket.TextMessage, p); err != nil { - return - } - } - case <-pingTicker.C: - ws.SetWriteDeadline(time.Now().Add(writeWait)) - if err := ws.WriteMessage(websocket.PingMessage, []byte{}); err != nil { - return - } - } - } -} - -func serveWs(w http.ResponseWriter, r *http.Request) { - ws, err := upgrader.Upgrade(w, r, nil) - if err != nil { - if _, ok := err.(websocket.HandshakeError); !ok { - log.Println(err) - } - return - } - - var lastMod time.Time - if n, err := strconv.ParseInt(r.FormValue("lastMod"), 16, 64); err == nil { - lastMod = time.Unix(0, n) - } - - go writer(ws, lastMod) - reader(ws) -} - -func serveHome(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - http.Error(w, "Not found", 404) - return - } - if r.Method != "GET" { - http.Error(w, "Method not allowed", 405) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - p, lastMod, err := readFileIfModified(time.Time{}) - if err != nil { - p = []byte(err.Error()) - lastMod = time.Unix(0, 0) - } - var v = struct { - Host string - Data string - LastMod string - }{ - r.Host, - string(p), - strconv.FormatInt(lastMod.UnixNano(), 16), - } - homeTempl.Execute(w, &v) -} - -func main() { - flag.Parse() - if flag.NArg() != 1 { - log.Fatal("filename not specified") - } - filename = flag.Args()[0] - http.HandleFunc("/", serveHome) - http.HandleFunc("/ws", serveWs) - if err := http.ListenAndServe(*addr, nil); err != nil { - log.Fatal(err) - } -} - -const homeHTML = `<!DOCTYPE html> -<html lang="en"> - <head> - <title>WebSocket Example</title> - </head> - <body> - <pre id="fileData">{{.Data}}</pre> - <script type="text/javascript"> - (function() { - var data = document.getElementById("fileData"); - var conn = new WebSocket("ws://{{.Host}}/ws?lastMod={{.LastMod}}"); - conn.onclose = function(evt) { - data.textContent = 'Connection closed'; - } - conn.onmessage = function(evt) { - console.log('file updated'); - data.textContent = evt.data; - } - })(); - </script> - </body> -</html> -` diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/json.go deleted file mode 100644 index 4f0e3687..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/json.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "encoding/json" - "io" -) - -// WriteJSON is deprecated, use c.WriteJSON instead. -func WriteJSON(c *Conn, v interface{}) error { - return c.WriteJSON(v) -} - -// WriteJSON writes the JSON encoding of v to the connection. -// -// See the documentation for encoding/json Marshal for details about the -// conversion of Go values to JSON. -func (c *Conn) WriteJSON(v interface{}) error { - w, err := c.NextWriter(TextMessage) - if err != nil { - return err - } - err1 := json.NewEncoder(w).Encode(v) - err2 := w.Close() - if err1 != nil { - return err1 - } - return err2 -} - -// ReadJSON is deprecated, use c.ReadJSON instead. -func ReadJSON(c *Conn, v interface{}) error { - return c.ReadJSON(v) -} - -// ReadJSON reads the next JSON-encoded message from the connection and stores -// it in the value pointed to by v. -// -// See the documentation for the encoding/json Unmarshal function for details -// about the conversion of JSON to a Go value. -func (c *Conn) ReadJSON(v interface{}) error { - _, r, err := c.NextReader() - if err != nil { - return err - } - err = json.NewDecoder(r).Decode(v) - if err == io.EOF { - // One value is expected in the message. - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/mask.go deleted file mode 100644 index 6a88bbc7..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/mask.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of -// this source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -// +build !appengine - -package websocket - -import "unsafe" - -const wordSize = int(unsafe.Sizeof(uintptr(0))) - -func maskBytes(key [4]byte, pos int, b []byte) int { - - // Mask one byte at a time for small buffers. - if len(b) < 2*wordSize { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 - } - - // Mask one byte at a time to word boundary. - if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { - n = wordSize - n - for i := range b[:n] { - b[i] ^= key[pos&3] - pos++ - } - b = b[n:] - } - - // Create aligned word size key. - var k [wordSize]byte - for i := range k { - k[i] = key[(pos+i)&3] - } - kw := *(*uintptr)(unsafe.Pointer(&k)) - - // Mask one word at a time. - n := (len(b) / wordSize) * wordSize - for i := 0; i < n; i += wordSize { - *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw - } - - // Mask one byte at a time for remaining bytes. - b = b[n:] - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - - return pos & 3 -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/mask_safe.go deleted file mode 100644 index 2aac060e..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/mask_safe.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of -// this source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -// +build appengine - -package websocket - -func maskBytes(key [4]byte, pos int, b []byte) int { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/prepared.go deleted file mode 100644 index 1efffbd1..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/prepared.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bytes" - "net" - "sync" - "time" -) - -// PreparedMessage caches on the wire representations of a message payload. -// Use PreparedMessage to efficiently send a message payload to multiple -// connections. PreparedMessage is especially useful when compression is used -// because the CPU and memory expensive compression operation can be executed -// once for a given set of compression options. -type PreparedMessage struct { - messageType int - data []byte - err error - mu sync.Mutex - frames map[prepareKey]*preparedFrame -} - -// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. -type prepareKey struct { - isServer bool - compress bool - compressionLevel int -} - -// preparedFrame contains data in wire representation. -type preparedFrame struct { - once sync.Once - data []byte -} - -// NewPreparedMessage returns an initialized PreparedMessage. You can then send -// it to connection using WritePreparedMessage method. Valid wire -// representation will be calculated lazily only once for a set of current -// connection options. -func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { - pm := &PreparedMessage{ - messageType: messageType, - frames: make(map[prepareKey]*preparedFrame), - data: data, - } - - // Prepare a plain server frame. - _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) - if err != nil { - return nil, err - } - - // To protect against caller modifying the data argument, remember the data - // copied to the plain server frame. - pm.data = frameData[len(frameData)-len(data):] - return pm, nil -} - -func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { - pm.mu.Lock() - frame, ok := pm.frames[key] - if !ok { - frame = &preparedFrame{} - pm.frames[key] = frame - } - pm.mu.Unlock() - - var err error - frame.once.Do(func() { - // Prepare a frame using a 'fake' connection. - // TODO: Refactor code in conn.go to allow more direct construction of - // the frame. - mu := make(chan bool, 1) - mu <- true - var nc prepareConn - c := &Conn{ - conn: &nc, - mu: mu, - isServer: key.isServer, - compressionLevel: key.compressionLevel, - enableWriteCompression: true, - writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), - } - if key.compress { - c.newCompressionWriter = compressNoContextTakeover - } - err = c.WriteMessage(pm.messageType, pm.data) - frame.data = nc.buf.Bytes() - }) - return pm.messageType, frame.data, err -} - -type prepareConn struct { - buf bytes.Buffer - net.Conn -} - -func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } -func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/server.go deleted file mode 100644 index 3495e0f1..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/server.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "errors" - "net" - "net/http" - "net/url" - "strings" - "time" -) - -// HandshakeError describes an error with the handshake from the peer. -type HandshakeError struct { - message string -} - -func (e HandshakeError) Error() string { return e.message } - -// Upgrader specifies parameters for upgrading an HTTP connection to a -// WebSocket connection. -type Upgrader struct { - // HandshakeTimeout specifies the duration for the handshake to complete. - HandshakeTimeout time.Duration - - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer - // size is zero, then buffers allocated by the HTTP server are used. The - // I/O buffer sizes do not limit the size of the messages that can be sent - // or received. - ReadBufferSize, WriteBufferSize int - - // Subprotocols specifies the server's supported protocols in order of - // preference. If this field is set, then the Upgrade method negotiates a - // subprotocol by selecting the first match in this list with a protocol - // requested by the client. - Subprotocols []string - - // Error specifies the function for generating HTTP error responses. If Error - // is nil, then http.Error is used to generate the HTTP response. - Error func(w http.ResponseWriter, r *http.Request, status int, reason error) - - // CheckOrigin returns true if the request Origin header is acceptable. If - // CheckOrigin is nil, the host in the Origin header must not be set or - // must match the host of the request. - CheckOrigin func(r *http.Request) bool - - // EnableCompression specify if the server should attempt to negotiate per - // message compression (RFC 7692). Setting this value to true does not - // guarantee that compression will be supported. Currently only "no context - // takeover" modes are supported. - EnableCompression bool -} - -func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { - err := HandshakeError{reason} - if u.Error != nil { - u.Error(w, r, status, err) - } else { - w.Header().Set("Sec-Websocket-Version", "13") - http.Error(w, http.StatusText(status), status) - } - return nil, err -} - -// checkSameOrigin returns true if the origin is not set or is equal to the request host. -func checkSameOrigin(r *http.Request) bool { - origin := r.Header["Origin"] - if len(origin) == 0 { - return true - } - u, err := url.Parse(origin[0]) - if err != nil { - return false - } - return u.Host == r.Host -} - -func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { - if u.Subprotocols != nil { - clientProtocols := Subprotocols(r) - for _, serverProtocol := range u.Subprotocols { - for _, clientProtocol := range clientProtocols { - if clientProtocol == serverProtocol { - return clientProtocol - } - } - } - } else if responseHeader != nil { - return responseHeader.Get("Sec-Websocket-Protocol") - } - return "" -} - -// Upgrade upgrades the HTTP server connection to the WebSocket protocol. -// -// The responseHeader is included in the response to the client's upgrade -// request. Use the responseHeader to specify cookies (Set-Cookie) and the -// application negotiated subprotocol (Sec-Websocket-Protocol). -// -// If the upgrade fails, then Upgrade replies to the client with an HTTP error -// response. -func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { - if r.Method != "GET" { - return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET") - } - - if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { - return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported") - } - - if !tokenListContainsValue(r.Header, "Connection", "upgrade") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header") - } - - if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header") - } - - if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") - } - - checkOrigin := u.CheckOrigin - if checkOrigin == nil { - checkOrigin = checkSameOrigin - } - if !checkOrigin(r) { - return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed") - } - - challengeKey := r.Header.Get("Sec-Websocket-Key") - if challengeKey == "" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-Websocket-Key' header is missing or blank") - } - - subprotocol := u.selectSubprotocol(r, responseHeader) - - // Negotiate PMCE - var compress bool - if u.EnableCompression { - for _, ext := range parseExtensions(r.Header) { - if ext[""] != "permessage-deflate" { - continue - } - compress = true - break - } - } - - var ( - netConn net.Conn - err error - ) - - h, ok := w.(http.Hijacker) - if !ok { - return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") - } - var brw *bufio.ReadWriter - netConn, brw, err = h.Hijack() - if err != nil { - return u.returnError(w, r, http.StatusInternalServerError, err.Error()) - } - - if brw.Reader.Buffered() > 0 { - netConn.Close() - return nil, errors.New("websocket: client sent data before handshake is complete") - } - - c := newConnBRW(netConn, true, u.ReadBufferSize, u.WriteBufferSize, brw) - c.subprotocol = subprotocol - - if compress { - c.newCompressionWriter = compressNoContextTakeover - c.newDecompressionReader = decompressNoContextTakeover - } - - p := c.writeBuf[:0] - p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) - p = append(p, computeAcceptKey(challengeKey)...) - p = append(p, "\r\n"...) - if c.subprotocol != "" { - p = append(p, "Sec-Websocket-Protocol: "...) - p = append(p, c.subprotocol...) - p = append(p, "\r\n"...) - } - if compress { - p = append(p, "Sec-Websocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) - } - for k, vs := range responseHeader { - if k == "Sec-Websocket-Protocol" { - continue - } - for _, v := range vs { - p = append(p, k...) - p = append(p, ": "...) - for i := 0; i < len(v); i++ { - b := v[i] - if b <= 31 { - // prevent response splitting. - b = ' ' - } - p = append(p, b) - } - p = append(p, "\r\n"...) - } - } - p = append(p, "\r\n"...) - - // Clear deadlines set by HTTP server. - netConn.SetDeadline(time.Time{}) - - if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) - } - if _, err = netConn.Write(p); err != nil { - netConn.Close() - return nil, err - } - if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Time{}) - } - - return c, nil -} - -// Upgrade upgrades the HTTP server connection to the WebSocket protocol. -// -// This function is deprecated, use websocket.Upgrader instead. -// -// The application is responsible for checking the request origin before -// calling Upgrade. An example implementation of the same origin policy is: -// -// if req.Header.Get("Origin") != "http://"+req.Host { -// http.Error(w, "Origin not allowed", 403) -// return -// } -// -// If the endpoint supports subprotocols, then the application is responsible -// for negotiating the protocol used on the connection. Use the Subprotocols() -// function to get the subprotocols requested by the client. Use the -// Sec-Websocket-Protocol response header to specify the subprotocol selected -// by the application. -// -// The responseHeader is included in the response to the client's upgrade -// request. Use the responseHeader to specify cookies (Set-Cookie) and the -// negotiated subprotocol (Sec-Websocket-Protocol). -// -// The connection buffers IO to the underlying network connection. The -// readBufSize and writeBufSize parameters specify the size of the buffers to -// use. Messages can be larger than the buffers. -// -// If the request is not a valid WebSocket handshake, then Upgrade returns an -// error of type HandshakeError. Applications should handle this error by -// replying to the client with an HTTP error response. -func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { - u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} - u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { - // don't return errors to maintain backwards compatibility - } - u.CheckOrigin = func(r *http.Request) bool { - // allow all connections by default - return true - } - return u.Upgrade(w, r, responseHeader) -} - -// Subprotocols returns the subprotocols requested by the client in the -// Sec-Websocket-Protocol header. -func Subprotocols(r *http.Request) []string { - h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) - if h == "" { - return nil - } - protocols := strings.Split(h, ",") - for i := range protocols { - protocols[i] = strings.TrimSpace(protocols[i]) - } - return protocols -} - -// IsWebSocketUpgrade returns true if the client requested upgrade to the -// WebSocket protocol. -func IsWebSocketUpgrade(r *http.Request) bool { - return tokenListContainsValue(r.Header, "Connection", "upgrade") && - tokenListContainsValue(r.Header, "Upgrade", "websocket") -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/util.go deleted file mode 100644 index 9a4908df..00000000 --- a/vendor/github.com/mattermost/platform/vendor/github.com/gorilla/websocket/util.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "crypto/rand" - "crypto/sha1" - "encoding/base64" - "io" - "net/http" - "strings" -) - -var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") - -func computeAcceptKey(challengeKey string) string { - h := sha1.New() - h.Write([]byte(challengeKey)) - h.Write(keyGUID) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -func generateChallengeKey() (string, error) { - p := make([]byte, 16) - if _, err := io.ReadFull(rand.Reader, p); err != nil { - return "", err - } - return base64.StdEncoding.EncodeToString(p), nil -} - -// Octet types from RFC 2616. -var octetTypes [256]byte - -const ( - isTokenOctet = 1 << iota - isSpaceOctet -) - -func init() { - // From RFC 2616 - // - // OCTET = <any 8-bit sequence of data> - // CHAR = <any US-ASCII character (octets 0 - 127)> - // CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)> - // CR = <US-ASCII CR, carriage return (13)> - // LF = <US-ASCII LF, linefeed (10)> - // SP = <US-ASCII SP, space (32)> - // HT = <US-ASCII HT, horizontal-tab (9)> - // <"> = <US-ASCII double-quote mark (34)> - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = <any OCTET except CTLs, but including LWS> - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1*<any CHAR except CTLs or separators> - // qdtext = <any TEXT except <">> - - for c := 0; c < 256; c++ { - var t byte - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpaceOctet - } - if isChar && !isCtl && !isSeparator { - t |= isTokenOctet - } - octetTypes[c] = t - } -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpaceOctet == 0 { - break - } - } - return s[i:] -} - -func nextToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isTokenOctet == 0 { - break - } - } - return s[:i], s[i:] -} - -func nextTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return nextToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j += 1 - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j += 1 - } - } - return "", "" - } - } - return "", "" -} - -// tokenListContainsValue returns true if the 1#token header with the given -// name contains token. -func tokenListContainsValue(header http.Header, name string, value string) bool { -headers: - for _, s := range header[name] { - for { - var t string - t, s = nextToken(skipSpace(s)) - if t == "" { - continue headers - } - s = skipSpace(s) - if s != "" && s[0] != ',' { - continue headers - } - if strings.EqualFold(t, value) { - return true - } - if s == "" { - continue headers - } - s = s[1:] - } - } - return false -} - -// parseExtensiosn parses WebSocket extensions from a header. -func parseExtensions(header http.Header) []map[string]string { - - // From RFC 6455: - // - // Sec-WebSocket-Extensions = extension-list - // extension-list = 1#extension - // extension = extension-token *( ";" extension-param ) - // extension-token = registered-token - // registered-token = token - // extension-param = token [ "=" (token | quoted-string) ] - // ;When using the quoted-string syntax variant, the value - // ;after quoted-string unescaping MUST conform to the - // ;'token' ABNF. - - var result []map[string]string -headers: - for _, s := range header["Sec-Websocket-Extensions"] { - for { - var t string - t, s = nextToken(skipSpace(s)) - if t == "" { - continue headers - } - ext := map[string]string{"": t} - for { - s = skipSpace(s) - if !strings.HasPrefix(s, ";") { - break - } - var k string - k, s = nextToken(skipSpace(s[1:])) - if k == "" { - continue headers - } - s = skipSpace(s) - var v string - if strings.HasPrefix(s, "=") { - v, s = nextTokenOrQuoted(skipSpace(s[1:])) - s = skipSpace(s) - } - if s != "" && s[0] != ',' && s[0] != ';' { - continue headers - } - ext[k] = v - } - if s != "" && s[0] != ',' { - continue headers - } - result = append(result, ext) - if s == "" { - continue headers - } - s = s[1:] - } - } - return result -} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/main.go b/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/main.go index 5d6b6ad4..58971033 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/main.go +++ b/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/main.go @@ -81,9 +81,9 @@ var codeTemplate = template.Must(template.New("spec").Parse(`package language func init() { {{range .PluralGroups}} - registerPluralSpec({{printf "%#v" .SplitLocales}}, &PluralSpec{ + RegisterPluralSpec({{printf "%#v" .SplitLocales}}, &PluralSpec{ Plurals: newPluralSet({{range $i, $e := .PluralRules}}{{if $i}}, {{end}}{{$e.CountTitle}}{{end}}), - PluralFunc: func(ops *operands) Plural { {{range .PluralRules}}{{if .GoCondition}} + PluralFunc: func(ops *Operands) Plural { {{range .PluralRules}}{{if .GoCondition}} // {{.Condition}} if {{.GoCondition}} { return {{.CountTitle}} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/language.go b/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/language.go index 9a155efc..b045a275 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/language.go +++ b/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/language.go @@ -45,7 +45,7 @@ func Parse(src string) []*Language { switch chr { case ',', ';', '.': tag := strings.TrimSpace(src[start:end]) - if spec := getPluralSpec(tag); spec != nil { + if spec := GetPluralSpec(tag); spec != nil { langs = append(langs, &Language{NormalizeTag(tag), spec}) } start = end + 1 @@ -53,12 +53,12 @@ func Parse(src string) []*Language { } if start > 0 { tag := strings.TrimSpace(src[start:]) - if spec := getPluralSpec(tag); spec != nil { + if spec := GetPluralSpec(tag); spec != nil { langs = append(langs, &Language{NormalizeTag(tag), spec}) } return dedupe(langs) } - if spec := getPluralSpec(src); spec != nil { + if spec := GetPluralSpec(src); spec != nil { langs = append(langs, &Language{NormalizeTag(src), spec}) } return langs diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands.go b/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands.go index 877bcc89..49ee7dc7 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands.go +++ b/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands.go @@ -7,7 +7,7 @@ import ( ) // http://unicode.org/reports/tr35/tr35-numbers.html#Operands -type operands struct { +type Operands struct { N float64 // absolute value of the source number (integer and decimals) I int64 // integer digits of n V int64 // number of visible fraction digits in n, with trailing zeros @@ -17,7 +17,7 @@ type operands struct { } // NmodEqualAny returns true if o represents an integer equal to any of the arguments. -func (o *operands) NequalsAny(any ...int64) bool { +func (o *Operands) NequalsAny(any ...int64) bool { for _, i := range any { if o.I == i && o.T == 0 { return true @@ -27,7 +27,7 @@ func (o *operands) NequalsAny(any ...int64) bool { } // NmodEqualAny returns true if o represents an integer equal to any of the arguments modulo mod. -func (o *operands) NmodEqualsAny(mod int64, any ...int64) bool { +func (o *Operands) NmodEqualsAny(mod int64, any ...int64) bool { modI := o.I % mod for _, i := range any { if modI == i && o.T == 0 { @@ -38,17 +38,17 @@ func (o *operands) NmodEqualsAny(mod int64, any ...int64) bool { } // NmodInRange returns true if o represents an integer in the closed interval [from, to]. -func (o *operands) NinRange(from, to int64) bool { +func (o *Operands) NinRange(from, to int64) bool { return o.T == 0 && from <= o.I && o.I <= to } // NmodInRange returns true if o represents an integer in the closed interval [from, to] modulo mod. -func (o *operands) NmodInRange(mod, from, to int64) bool { +func (o *Operands) NmodInRange(mod, from, to int64) bool { modI := o.I % mod return o.T == 0 && from <= modI && modI <= to } -func newOperands(v interface{}) (*operands, error) { +func newOperands(v interface{}) (*Operands, error) { switch v := v.(type) { case int: return newOperandsInt64(int64(v)), nil @@ -69,14 +69,14 @@ func newOperands(v interface{}) (*operands, error) { } } -func newOperandsInt64(i int64) *operands { +func newOperandsInt64(i int64) *Operands { if i < 0 { i = -i } - return &operands{float64(i), i, 0, 0, 0, 0} + return &Operands{float64(i), i, 0, 0, 0, 0} } -func newOperandsString(s string) (*operands, error) { +func newOperandsString(s string) (*Operands, error) { if s[0] == '-' { s = s[1:] } @@ -84,7 +84,7 @@ func newOperandsString(s string) (*operands, error) { if err != nil { return nil, err } - ops := &operands{N: n} + ops := &Operands{N: n} parts := strings.SplitN(s, ".", 2) ops.I, err = strconv.ParseInt(parts[0], 10, 64) if err != nil { diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec.go b/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec.go index fc352268..fc31e880 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec.go +++ b/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec.go @@ -7,7 +7,7 @@ import "strings" // http://unicode.org/reports/tr35/tr35-numbers.html#Operands type PluralSpec struct { Plurals map[Plural]struct{} - PluralFunc func(*operands) Plural + PluralFunc func(*Operands) Plural } var pluralSpecs = make(map[string]*PluralSpec) @@ -18,7 +18,8 @@ func normalizePluralSpecID(id string) string { return id } -func registerPluralSpec(ids []string, ps *PluralSpec) { +// RegisterPluralSpec registers a new plural spec for the language ids. +func RegisterPluralSpec(ids []string, ps *PluralSpec) { for _, id := range ids { id = normalizePluralSpecID(id) pluralSpecs[id] = ps @@ -35,9 +36,9 @@ func (ps *PluralSpec) Plural(number interface{}) (Plural, error) { return ps.PluralFunc(ops), nil } -// getPluralSpec returns the PluralSpec that matches the longest prefix of tag. +// GetPluralSpec returns the PluralSpec that matches the longest prefix of tag. // It returns nil if no PluralSpec matches tag. -func getPluralSpec(tag string) *PluralSpec { +func GetPluralSpec(tag string) *PluralSpec { tag = NormalizeTag(tag) subtag := tag for { diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen.go b/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen.go index c9b4f266..0268bb92 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen.go +++ b/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen.go @@ -4,15 +4,15 @@ package language func init() { - registerPluralSpec([]string{"bm", "bo", "dz", "id", "ig", "ii", "in", "ja", "jbo", "jv", "jw", "kde", "kea", "km", "ko", "lkt", "lo", "ms", "my", "nqo", "root", "sah", "ses", "sg", "th", "to", "vi", "wo", "yo", "zh"}, &PluralSpec{ + RegisterPluralSpec([]string{"bm", "bo", "dz", "id", "ig", "ii", "in", "ja", "jbo", "jv", "jw", "kde", "kea", "km", "ko", "lkt", "lo", "ms", "my", "nqo", "root", "sah", "ses", "sg", "th", "to", "vi", "wo", "yo", "yue", "zh"}, &PluralSpec{ Plurals: newPluralSet(Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { return Other }, }) - registerPluralSpec([]string{"am", "as", "bn", "fa", "gu", "hi", "kn", "mr", "zu"}, &PluralSpec{ + RegisterPluralSpec([]string{"am", "as", "bn", "fa", "gu", "hi", "kn", "mr", "zu"}, &PluralSpec{ Plurals: newPluralSet(One, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // i = 0 or n = 1 if intEqualsAny(ops.I, 0) || ops.NequalsAny(1) { @@ -21,9 +21,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"ff", "fr", "hy", "kab"}, &PluralSpec{ + RegisterPluralSpec([]string{"ff", "fr", "hy", "kab"}, &PluralSpec{ Plurals: newPluralSet(One, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // i = 0,1 if intEqualsAny(ops.I, 0, 1) { return One @@ -31,9 +31,19 @@ func init() { return Other }, }) - registerPluralSpec([]string{"ast", "ca", "de", "en", "et", "fi", "fy", "gl", "it", "ji", "nl", "sv", "sw", "ur", "yi"}, &PluralSpec{ + RegisterPluralSpec([]string{"pt"}, &PluralSpec{ Plurals: newPluralSet(One, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { + // i = 0..1 + if intInRange(ops.I, 0, 1) { + return One + } + return Other + }, + }) + RegisterPluralSpec([]string{"ast", "ca", "de", "en", "et", "fi", "fy", "gl", "it", "ji", "nl", "sv", "sw", "ur", "yi"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *Operands) Plural { // i = 1 and v = 0 if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { return One @@ -41,9 +51,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"si"}, &PluralSpec{ + RegisterPluralSpec([]string{"si"}, &PluralSpec{ Plurals: newPluralSet(One, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n = 0,1 or i = 0 and f = 1 if ops.NequalsAny(0, 1) || intEqualsAny(ops.I, 0) && intEqualsAny(ops.F, 1) { @@ -52,9 +62,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"ak", "bh", "guw", "ln", "mg", "nso", "pa", "ti", "wa"}, &PluralSpec{ + RegisterPluralSpec([]string{"ak", "bh", "guw", "ln", "mg", "nso", "pa", "ti", "wa"}, &PluralSpec{ Plurals: newPluralSet(One, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n = 0..1 if ops.NinRange(0, 1) { return One @@ -62,9 +72,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"tzm"}, &PluralSpec{ + RegisterPluralSpec([]string{"tzm"}, &PluralSpec{ Plurals: newPluralSet(One, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n = 0..1 or n = 11..99 if ops.NinRange(0, 1) || ops.NinRange(11, 99) { @@ -73,19 +83,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"pt"}, &PluralSpec{ - Plurals: newPluralSet(One, Other), - PluralFunc: func(ops *operands) Plural { - // n = 0..2 and n != 2 - if ops.NinRange(0, 2) && !ops.NequalsAny(2) { - return One - } - return Other - }, - }) - registerPluralSpec([]string{"af", "asa", "az", "bem", "bez", "bg", "brx", "ce", "cgg", "chr", "ckb", "dv", "ee", "el", "eo", "es", "eu", "fo", "fur", "gsw", "ha", "haw", "hu", "jgo", "jmc", "ka", "kaj", "kcg", "kk", "kkj", "kl", "ks", "ksb", "ku", "ky", "lb", "lg", "mas", "mgo", "ml", "mn", "nah", "nb", "nd", "ne", "nn", "nnh", "no", "nr", "ny", "nyn", "om", "or", "os", "pap", "ps", "rm", "rof", "rwk", "saq", "sdh", "seh", "sn", "so", "sq", "ss", "ssy", "st", "syr", "ta", "te", "teo", "tig", "tk", "tn", "tr", "ts", "ug", "uz", "ve", "vo", "vun", "wae", "xh", "xog"}, &PluralSpec{ + RegisterPluralSpec([]string{"af", "asa", "az", "bem", "bez", "bg", "brx", "ce", "cgg", "chr", "ckb", "dv", "ee", "el", "eo", "es", "eu", "fo", "fur", "gsw", "ha", "haw", "hu", "jgo", "jmc", "ka", "kaj", "kcg", "kk", "kkj", "kl", "ks", "ksb", "ku", "ky", "lb", "lg", "mas", "mgo", "ml", "mn", "nah", "nb", "nd", "ne", "nn", "nnh", "no", "nr", "ny", "nyn", "om", "or", "os", "pap", "ps", "rm", "rof", "rwk", "saq", "sdh", "seh", "sn", "so", "sq", "ss", "ssy", "st", "syr", "ta", "te", "teo", "tig", "tk", "tn", "tr", "ts", "ug", "uz", "ve", "vo", "vun", "wae", "xh", "xog"}, &PluralSpec{ Plurals: newPluralSet(One, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n = 1 if ops.NequalsAny(1) { return One @@ -93,19 +93,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"pt_PT"}, &PluralSpec{ - Plurals: newPluralSet(One, Other), - PluralFunc: func(ops *operands) Plural { - // n = 1 and v = 0 - if ops.NequalsAny(1) && intEqualsAny(ops.V, 0) { - return One - } - return Other - }, - }) - registerPluralSpec([]string{"da"}, &PluralSpec{ + RegisterPluralSpec([]string{"da"}, &PluralSpec{ Plurals: newPluralSet(One, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n = 1 or t != 0 and i = 0,1 if ops.NequalsAny(1) || !intEqualsAny(ops.T, 0) && intEqualsAny(ops.I, 0, 1) { @@ -114,9 +104,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"is"}, &PluralSpec{ + RegisterPluralSpec([]string{"is"}, &PluralSpec{ Plurals: newPluralSet(One, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // t = 0 and i % 10 = 1 and i % 100 != 11 or t != 0 if intEqualsAny(ops.T, 0) && intEqualsAny(ops.I%10, 1) && !intEqualsAny(ops.I%100, 11) || !intEqualsAny(ops.T, 0) { @@ -125,9 +115,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"mk"}, &PluralSpec{ + RegisterPluralSpec([]string{"mk"}, &PluralSpec{ Plurals: newPluralSet(One, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // v = 0 and i % 10 = 1 or f % 10 = 1 if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) || intEqualsAny(ops.F%10, 1) { @@ -136,9 +126,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"fil", "tl"}, &PluralSpec{ + RegisterPluralSpec([]string{"fil", "tl"}, &PluralSpec{ Plurals: newPluralSet(One, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // v = 0 and i = 1,2,3 or v = 0 and i % 10 != 4,6,9 or v != 0 and f % 10 != 4,6,9 if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I, 1, 2, 3) || intEqualsAny(ops.V, 0) && !intEqualsAny(ops.I%10, 4, 6, 9) || @@ -148,9 +138,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"lv", "prg"}, &PluralSpec{ + RegisterPluralSpec([]string{"lv", "prg"}, &PluralSpec{ Plurals: newPluralSet(Zero, One, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n % 10 = 0 or n % 100 = 11..19 or v = 2 and f % 100 = 11..19 if ops.NmodEqualsAny(10, 0) || ops.NmodInRange(100, 11, 19) || @@ -166,9 +156,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"lag"}, &PluralSpec{ + RegisterPluralSpec([]string{"lag"}, &PluralSpec{ Plurals: newPluralSet(Zero, One, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n = 0 if ops.NequalsAny(0) { return Zero @@ -180,9 +170,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"ksh"}, &PluralSpec{ + RegisterPluralSpec([]string{"ksh"}, &PluralSpec{ Plurals: newPluralSet(Zero, One, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n = 0 if ops.NequalsAny(0) { return Zero @@ -194,9 +184,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"iu", "kw", "naq", "se", "sma", "smi", "smj", "smn", "sms"}, &PluralSpec{ + RegisterPluralSpec([]string{"iu", "kw", "naq", "se", "sma", "smi", "smj", "smn", "sms"}, &PluralSpec{ Plurals: newPluralSet(One, Two, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n = 1 if ops.NequalsAny(1) { return One @@ -208,9 +198,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"shi"}, &PluralSpec{ + RegisterPluralSpec([]string{"shi"}, &PluralSpec{ Plurals: newPluralSet(One, Few, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // i = 0 or n = 1 if intEqualsAny(ops.I, 0) || ops.NequalsAny(1) { @@ -223,9 +213,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"mo", "ro"}, &PluralSpec{ + RegisterPluralSpec([]string{"mo", "ro"}, &PluralSpec{ Plurals: newPluralSet(One, Few, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // i = 1 and v = 0 if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { return One @@ -239,9 +229,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"bs", "hr", "sh", "sr"}, &PluralSpec{ + RegisterPluralSpec([]string{"bs", "hr", "sh", "sr"}, &PluralSpec{ Plurals: newPluralSet(One, Few, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) && !intEqualsAny(ops.I%100, 11) || intEqualsAny(ops.F%10, 1) && !intEqualsAny(ops.F%100, 11) { @@ -255,9 +245,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"gd"}, &PluralSpec{ + RegisterPluralSpec([]string{"gd"}, &PluralSpec{ Plurals: newPluralSet(One, Two, Few, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n = 1,11 if ops.NequalsAny(1, 11) { return One @@ -273,9 +263,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"sl"}, &PluralSpec{ + RegisterPluralSpec([]string{"sl"}, &PluralSpec{ Plurals: newPluralSet(One, Two, Few, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // v = 0 and i % 100 = 1 if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 1) { return One @@ -292,9 +282,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"dsb", "hsb"}, &PluralSpec{ + RegisterPluralSpec([]string{"dsb", "hsb"}, &PluralSpec{ Plurals: newPluralSet(One, Two, Few, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // v = 0 and i % 100 = 1 or f % 100 = 1 if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 1) || intEqualsAny(ops.F%100, 1) { @@ -313,9 +303,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"he", "iw"}, &PluralSpec{ + RegisterPluralSpec([]string{"he", "iw"}, &PluralSpec{ Plurals: newPluralSet(One, Two, Many, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // i = 1 and v = 0 if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { return One @@ -331,9 +321,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"cs", "sk"}, &PluralSpec{ + RegisterPluralSpec([]string{"cs", "sk"}, &PluralSpec{ Plurals: newPluralSet(One, Few, Many, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // i = 1 and v = 0 if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { return One @@ -349,9 +339,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"pl"}, &PluralSpec{ + RegisterPluralSpec([]string{"pl"}, &PluralSpec{ Plurals: newPluralSet(One, Few, Many, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // i = 1 and v = 0 if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { return One @@ -369,9 +359,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"be"}, &PluralSpec{ + RegisterPluralSpec([]string{"be"}, &PluralSpec{ Plurals: newPluralSet(One, Few, Many, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n % 10 = 1 and n % 100 != 11 if ops.NmodEqualsAny(10, 1) && !ops.NmodEqualsAny(100, 11) { return One @@ -389,9 +379,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"lt"}, &PluralSpec{ + RegisterPluralSpec([]string{"lt"}, &PluralSpec{ Plurals: newPluralSet(One, Few, Many, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n % 10 = 1 and n % 100 != 11..19 if ops.NmodEqualsAny(10, 1) && !ops.NmodInRange(100, 11, 19) { return One @@ -407,9 +397,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"mt"}, &PluralSpec{ + RegisterPluralSpec([]string{"mt"}, &PluralSpec{ Plurals: newPluralSet(One, Few, Many, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n = 1 if ops.NequalsAny(1) { return One @@ -426,9 +416,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"ru", "uk"}, &PluralSpec{ + RegisterPluralSpec([]string{"ru", "uk"}, &PluralSpec{ Plurals: newPluralSet(One, Few, Many, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // v = 0 and i % 10 = 1 and i % 100 != 11 if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) && !intEqualsAny(ops.I%100, 11) { return One @@ -446,9 +436,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"br"}, &PluralSpec{ + RegisterPluralSpec([]string{"br"}, &PluralSpec{ Plurals: newPluralSet(One, Two, Few, Many, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n % 10 = 1 and n % 100 != 11,71,91 if ops.NmodEqualsAny(10, 1) && !ops.NmodEqualsAny(100, 11, 71, 91) { return One @@ -468,9 +458,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"ga"}, &PluralSpec{ + RegisterPluralSpec([]string{"ga"}, &PluralSpec{ Plurals: newPluralSet(One, Two, Few, Many, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n = 1 if ops.NequalsAny(1) { return One @@ -490,9 +480,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"gv"}, &PluralSpec{ + RegisterPluralSpec([]string{"gv"}, &PluralSpec{ Plurals: newPluralSet(One, Two, Few, Many, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // v = 0 and i % 10 = 1 if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) { return One @@ -512,9 +502,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"ar"}, &PluralSpec{ + RegisterPluralSpec([]string{"ar", "ars"}, &PluralSpec{ Plurals: newPluralSet(Zero, One, Two, Few, Many, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n = 0 if ops.NequalsAny(0) { return Zero @@ -538,9 +528,9 @@ func init() { return Other }, }) - registerPluralSpec([]string{"cy"}, &PluralSpec{ + RegisterPluralSpec([]string{"cy"}, &PluralSpec{ Plurals: newPluralSet(Zero, One, Two, Few, Many, Other), - PluralFunc: func(ops *operands) Plural { + PluralFunc: func(ops *Operands) Plural { // n = 0 if ops.NequalsAny(0) { return Zero diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/translation/plural_translation.go b/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/translation/plural_translation.go index 5dd74b2f..17c32609 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/translation/plural_translation.go +++ b/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/translation/plural_translation.go @@ -50,7 +50,7 @@ func (pt *pluralTranslation) Normalize(l *language.Language) Translation { func (pt *pluralTranslation) Backfill(src Translation) Translation { for pc, t := range pt.templates { - if t == nil || t.src == "" { + if (t == nil || t.src == "") && src != nil { pt.templates[pc] = src.Template(language.Other) } } diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/translation/single_translation.go b/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/translation/single_translation.go index 9fcba5a1..a76c8c94 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/translation/single_translation.go +++ b/vendor/github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n/translation/single_translation.go @@ -37,7 +37,7 @@ func (st *singleTranslation) Normalize(language *language.Language) Translation } func (st *singleTranslation) Backfill(src Translation) Translation { - if st.template == nil || st.template.src == "" { + if (st.template == nil || st.template.src == "") && src != nil { st.template = src.Template(language.Other) } return st diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/doc.go index 3c89619e..d5fd98c0 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/doc.go +++ b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/doc.go @@ -17,7 +17,7 @@ // JSONPath-like queries // // The package github.com/pelletier/go-toml/query implements a system -// similar to JSONPath to quickly retrive elements of a TOML document using a +// similar to JSONPath to quickly retrieve elements of a TOML document using a // single expression. See the package documentation for more information. // package toml diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/fuzz.go new file mode 100644 index 00000000..14570c8d --- /dev/null +++ b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/fuzz.go @@ -0,0 +1,31 @@ +// +build gofuzz + +package toml + +func Fuzz(data []byte) int { + tree, err := LoadBytes(data) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + str, err := tree.ToTomlString() + if err != nil { + if str != "" { + panic(`str must be "" if there is an error`) + } + panic(err) + } + + tree, err = Load(str) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + return 1 +} diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/keysparsing.go index d62ca5fd..9707c688 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/keysparsing.go +++ b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/keysparsing.go @@ -6,15 +6,37 @@ import ( "bytes" "errors" "fmt" + "strconv" "unicode" ) +var escapeSequenceMap = map[rune]rune{ + 'b': '\b', + 't': '\t', + 'n': '\n', + 'f': '\f', + 'r': '\r', + '"': '"', + '\\': '\\', +} + +type parseKeyState int + +const ( + BARE parseKeyState = iota + BASIC + LITERAL + ESC + UNICODE_4 + UNICODE_8 +) + func parseKey(key string) ([]string, error) { groups := []string{} var buffer bytes.Buffer - inQuotes := false + var hex bytes.Buffer + state := BARE wasInQuotes := false - escapeNext := false ignoreSpace := true expectDot := false @@ -25,25 +47,67 @@ func parseKey(key string) ([]string, error) { } ignoreSpace = false } - if escapeNext { - buffer.WriteRune(char) - escapeNext = false + + if state == ESC { + if char == 'u' { + state = UNICODE_4 + hex.Reset() + } else if char == 'U' { + state = UNICODE_8 + hex.Reset() + } else if newChar, ok := escapeSequenceMap[char]; ok { + buffer.WriteRune(newChar) + state = BASIC + } else { + return nil, fmt.Errorf(`invalid escape sequence \%c`, char) + } + continue + } + + if state == UNICODE_4 || state == UNICODE_8 { + if isHexDigit(char) { + hex.WriteRune(char) + } + if (state == UNICODE_4 && hex.Len() == 4) || (state == UNICODE_8 && hex.Len() == 8) { + if value, err := strconv.ParseInt(hex.String(), 16, 32); err == nil { + buffer.WriteRune(rune(value)) + } else { + return nil, err + } + state = BASIC + } continue } + switch char { case '\\': - escapeNext = true - continue + if state == BASIC { + state = ESC + } else if state == LITERAL { + buffer.WriteRune(char) + } + case '\'': + if state == BARE { + state = LITERAL + } else if state == LITERAL { + groups = append(groups, buffer.String()) + buffer.Reset() + wasInQuotes = true + state = BARE + } + expectDot = false case '"': - if inQuotes { + if state == BARE { + state = BASIC + } else if state == BASIC { groups = append(groups, buffer.String()) buffer.Reset() + state = BARE wasInQuotes = true } - inQuotes = !inQuotes expectDot = false case '.': - if inQuotes { + if state != BARE { buffer.WriteRune(char) } else { if !wasInQuotes { @@ -58,28 +122,31 @@ func parseKey(key string) ([]string, error) { wasInQuotes = false } case ' ': - if inQuotes { + if state == BASIC { buffer.WriteRune(char) } else { expectDot = true } default: - if !inQuotes && !isValidBareChar(char) { - return nil, fmt.Errorf("invalid bare character: %c", char) - } - if !inQuotes && expectDot { - return nil, errors.New("what?") + if state == BARE { + if !isValidBareChar(char) { + return nil, fmt.Errorf("invalid bare character: %c", char) + } else if expectDot { + return nil, errors.New("what?") + } } buffer.WriteRune(char) expectDot = false } } - if inQuotes { - return nil, errors.New("mismatched quotes") - } - if escapeNext { + + // state must be BARE at the end + if state == ESC { return nil, errors.New("unfinished escape sequence") + } else if state != BARE { + return nil, errors.New("mismatched quotes") } + if buffer.Len() > 0 { groups = append(groups, buffer.String()) } diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/marshal.go index 1a3176f9..1bbdfa1d 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/marshal.go +++ b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/marshal.go @@ -4,17 +4,29 @@ import ( "bytes" "errors" "fmt" + "io" "reflect" + "strconv" "strings" "time" ) type tomlOpts struct { name string + comment string + commented bool include bool omitempty bool } +type encOpts struct { + quoteMapKeys bool +} + +var encOptsDefaults = encOpts{ + quoteMapKeys: false, +} + var timeType = reflect.TypeOf(time.Time{}) var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() @@ -94,8 +106,15 @@ encoder, except that there is no concept of a Marshaler interface or MarshalTOML function for sub-structs, and currently only definite types can be marshaled (i.e. no `interface{}`). +The following struct annotations are supported: + + toml:"Field" Overrides the field's name to output. + omitempty When set, empty values and groups are not emitted. + comment:"comment" Emits a # comment on the same line. This supports new lines. + commented:"true" Emits the value as commented. + Note that pointers are automatically assigned the "omitempty" option, as TOML -explicity does not handle null values (saying instead the label should be +explicitly does not handle null values (saying instead the label should be dropped). Tree structural types and corresponding marshal types: @@ -115,6 +134,47 @@ Tree primitive types and corresponding marshal types: time.Time time.Time{}, pointers to same */ func Marshal(v interface{}) ([]byte, error) { + return NewEncoder(nil).marshal(v) +} + +// Encoder writes TOML values to an output stream. +type Encoder struct { + w io.Writer + encOpts +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + encOpts: encOptsDefaults, + } +} + +// Encode writes the TOML encoding of v to the stream. +// +// See the documentation for Marshal for details. +func (e *Encoder) Encode(v interface{}) error { + b, err := e.marshal(v) + if err != nil { + return err + } + if _, err := e.w.Write(b); err != nil { + return err + } + return nil +} + +// QuoteMapKeys sets up the encoder to encode +// maps with string type keys with quoted TOML keys. +// +// This relieves the character limitations on map keys. +func (e *Encoder) QuoteMapKeys(v bool) *Encoder { + e.quoteMapKeys = v + return e +} + +func (e *Encoder) marshal(v interface{}) ([]byte, error) { mtype := reflect.TypeOf(v) if mtype.Kind() != reflect.Struct { return []byte{}, errors.New("Only a struct can be marshaled to TOML") @@ -123,7 +183,7 @@ func Marshal(v interface{}) ([]byte, error) { if isCustomMarshaler(mtype) { return callCustomMarshaler(sval) } - t, err := valueToTree(mtype, sval) + t, err := e.valueToTree(mtype, sval) if err != nil { return []byte{}, err } @@ -132,9 +192,9 @@ func Marshal(v interface{}) ([]byte, error) { } // Convert given marshal struct or map value to toml tree -func valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { +func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { if mtype.Kind() == reflect.Ptr { - return valueToTree(mtype.Elem(), mval.Elem()) + return e.valueToTree(mtype.Elem(), mval.Elem()) } tval := newTree() switch mtype.Kind() { @@ -143,31 +203,39 @@ func valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { mtypef, mvalf := mtype.Field(i), mval.Field(i) opts := tomlOptions(mtypef) if opts.include && (!opts.omitempty || !isZero(mvalf)) { - val, err := valueToToml(mtypef.Type, mvalf) + val, err := e.valueToToml(mtypef.Type, mvalf) if err != nil { return nil, err } - tval.Set(opts.name, val) + tval.Set(opts.name, opts.comment, opts.commented, val) } } case reflect.Map: for _, key := range mval.MapKeys() { mvalf := mval.MapIndex(key) - val, err := valueToToml(mtype.Elem(), mvalf) + val, err := e.valueToToml(mtype.Elem(), mvalf) if err != nil { return nil, err } - tval.Set(key.String(), val) + if e.quoteMapKeys { + keyStr, err := tomlValueStringRepresentation(key.String()) + if err != nil { + return nil, err + } + tval.SetPath([]string{keyStr}, "", false, val) + } else { + tval.Set(key.String(), "", false, val) + } } } return tval, nil } // Convert given marshal slice to slice of Toml trees -func valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { +func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { tval := make([]*Tree, mval.Len(), mval.Len()) for i := 0; i < mval.Len(); i++ { - val, err := valueToTree(mtype.Elem(), mval.Index(i)) + val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) if err != nil { return nil, err } @@ -177,10 +245,10 @@ func valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { } // Convert given marshal slice to slice of toml values -func valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { +func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { tval := make([]interface{}, mval.Len(), mval.Len()) for i := 0; i < mval.Len(); i++ { - val, err := valueToToml(mtype.Elem(), mval.Index(i)) + val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) if err != nil { return nil, err } @@ -190,19 +258,19 @@ func valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, err } // Convert given marshal value to toml value -func valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { +func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { if mtype.Kind() == reflect.Ptr { - return valueToToml(mtype.Elem(), mval.Elem()) + return e.valueToToml(mtype.Elem(), mval.Elem()) } switch { case isCustomMarshaler(mtype): return callCustomMarshaler(mval) case isTree(mtype): - return valueToTree(mtype, mval) + return e.valueToTree(mtype, mval) case isTreeSlice(mtype): - return valueToTreeSlice(mtype, mval) + return e.valueToTreeSlice(mtype, mval) case isOtherSlice(mtype): - return valueToOtherSlice(mtype, mval) + return e.valueToOtherSlice(mtype, mval) default: switch mtype.Kind() { case reflect.Bool: @@ -227,17 +295,16 @@ func valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { // Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for // sub-structs, and only definite types can be unmarshaled. func (t *Tree) Unmarshal(v interface{}) error { - mtype := reflect.TypeOf(v) - if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct { - return errors.New("Only a pointer to struct can be unmarshaled from TOML") - } + d := Decoder{tval: t} + return d.unmarshal(v) +} - sval, err := valueFromTree(mtype.Elem(), t) - if err != nil { - return err - } - reflect.ValueOf(v).Elem().Set(sval) - return nil +// Marshal returns the TOML encoding of Tree. +// See Marshal() documentation for types mapping table. +func (t *Tree) Marshal() ([]byte, error) { + var buf bytes.Buffer + err := NewEncoder(&buf).Encode(t) + return buf.Bytes(), err } // Unmarshal parses the TOML-encoded data and stores the result in the value @@ -246,6 +313,10 @@ func (t *Tree) Unmarshal(v interface{}) error { // sub-structs, and currently only definite types can be unmarshaled to (i.e. no // `interface{}`). // +// The following struct annotations are supported: +// +// toml:"Field" Overrides the field's name to map to. +// // See Marshal() documentation for types mapping table. func Unmarshal(data []byte, v interface{}) error { t, err := LoadReader(bytes.NewReader(data)) @@ -255,10 +326,52 @@ func Unmarshal(data []byte, v interface{}) error { return t.Unmarshal(v) } +// Decoder reads and decodes TOML values from an input stream. +type Decoder struct { + r io.Reader + tval *Tree + encOpts +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + encOpts: encOptsDefaults, + } +} + +// Decode reads a TOML-encoded value from it's input +// and unmarshals it in the value pointed at by v. +// +// See the documentation for Marshal for details. +func (d *Decoder) Decode(v interface{}) error { + var err error + d.tval, err = LoadReader(d.r) + if err != nil { + return err + } + return d.unmarshal(v) +} + +func (d *Decoder) unmarshal(v interface{}) error { + mtype := reflect.TypeOf(v) + if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct { + return errors.New("Only a pointer to struct can be unmarshaled from TOML") + } + + sval, err := d.valueFromTree(mtype.Elem(), d.tval) + if err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(sval) + return nil +} + // Convert toml tree to marshal struct or map, using marshal type -func valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { +func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { if mtype.Kind() == reflect.Ptr { - return unwrapPointer(mtype, tval) + return d.unwrapPointer(mtype, tval) } var mval reflect.Value switch mtype.Kind() { @@ -276,7 +389,7 @@ func valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { continue } val := tval.Get(key) - mvalf, err := valueFromToml(mtypef.Type, val) + mvalf, err := d.valueFromToml(mtypef.Type, val) if err != nil { return mval, formatError(err, tval.GetPosition(key)) } @@ -288,8 +401,9 @@ func valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { case reflect.Map: mval = reflect.MakeMap(mtype) for _, key := range tval.Keys() { - val := tval.Get(key) - mvalf, err := valueFromToml(mtype.Elem(), val) + // TODO: path splits key + val := tval.GetPath([]string{key}) + mvalf, err := d.valueFromToml(mtype.Elem(), val) if err != nil { return mval, formatError(err, tval.GetPosition(key)) } @@ -300,10 +414,10 @@ func valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { } // Convert toml value to marshal struct/map slice, using marshal type -func valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { +func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { mval := reflect.MakeSlice(mtype, len(tval), len(tval)) for i := 0; i < len(tval); i++ { - val, err := valueFromTree(mtype.Elem(), tval[i]) + val, err := d.valueFromTree(mtype.Elem(), tval[i]) if err != nil { return mval, err } @@ -313,10 +427,10 @@ func valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) } // Convert toml value to marshal primitive slice, using marshal type -func valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { +func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { mval := reflect.MakeSlice(mtype, len(tval), len(tval)) for i := 0; i < len(tval); i++ { - val, err := valueFromToml(mtype.Elem(), tval[i]) + val, err := d.valueFromToml(mtype.Elem(), tval[i]) if err != nil { return mval, err } @@ -326,17 +440,30 @@ func valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, } // Convert toml value to marshal value, using marshal type -func valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) { +func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) { if mtype.Kind() == reflect.Ptr { - return unwrapPointer(mtype, tval) + return d.unwrapPointer(mtype, tval) } - switch { - case isTree(mtype): - return valueFromTree(mtype, tval.(*Tree)) - case isTreeSlice(mtype): - return valueFromTreeSlice(mtype, tval.([]*Tree)) - case isOtherSlice(mtype): - return valueFromOtherSlice(mtype, tval.([]interface{})) + + switch tval.(type) { + case *Tree: + if isTree(mtype) { + return d.valueFromTree(mtype, tval.(*Tree)) + } else { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) + } + case []*Tree: + if isTreeSlice(mtype) { + return d.valueFromTreeSlice(mtype, tval.([]*Tree)) + } else { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) + } + case []interface{}: + if isOtherSlice(mtype) { + return d.valueFromOtherSlice(mtype, tval.([]interface{})) + } else { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) + } default: switch mtype.Kind() { case reflect.Bool: @@ -430,13 +557,13 @@ func valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) } return reflect.ValueOf(val), nil default: - return reflect.ValueOf(nil), fmt.Errorf("Unmarshal can't handle %v(%v)", mtype, mtype.Kind()) + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) } } } -func unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) { - val, err := valueFromToml(mtype.Elem(), tval) +func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + val, err := d.valueFromToml(mtype.Elem(), tval) if err != nil { return reflect.ValueOf(nil), err } @@ -448,7 +575,12 @@ func unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) func tomlOptions(vf reflect.StructField) tomlOpts { tag := vf.Tag.Get("toml") parse := strings.Split(tag, ",") - result := tomlOpts{vf.Name, true, false} + var comment string + if c := vf.Tag.Get("comment"); c != "" { + comment = c + } + commented, _ := strconv.ParseBool(vf.Tag.Get("commented")) + result := tomlOpts{name: vf.Name, comment: comment, commented: commented, include: true, omitempty: false} if parse[0] != "" { if parse[0] == "-" && len(parse) == 1 { result.include = false diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/parser.go index 8ee49cb5..d492a1e6 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/parser.go +++ b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/parser.go @@ -110,7 +110,7 @@ func (p *tomlParser) parseGroupArray() tomlParserStateFn { newTree := newTree() newTree.position = startToken.Position array = append(array, newTree) - p.tree.SetPath(p.currentTable, array) + p.tree.SetPath(p.currentTable, "", false, array) // remove all keys that were children of this table array prefix := key.val + "." @@ -205,7 +205,7 @@ func (p *tomlParser) parseAssign() tomlParserStateFn { case *Tree, []*Tree: toInsert = value default: - toInsert = &tomlValue{value, key.Position} + toInsert = &tomlValue{value: value, position: key.Position} } targetNode.values[keyVal] = toInsert return p.parseStart @@ -299,7 +299,7 @@ Loop: key := p.getToken() p.assume(tokenEqual) value := p.parseRvalue() - tree.Set(key.val, value) + tree.Set(key.val, "", false, value) case tokenComma: if previous == nil { p.raiseError(follow, "inline table cannot start with a comma") diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/query/doc.go b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/query/doc.go index f999fc96..ed63c110 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/query/doc.go +++ b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/query/doc.go @@ -139,7 +139,7 @@ // Compiled Queries // // Queries may be executed directly on a Tree object, or compiled ahead -// of time and executed discretely. The former is more convienent, but has the +// of time and executed discretely. The former is more convenient, but has the // penalty of having to recompile the query expression each time. // // // basic query diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/query/parser.go b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/query/parser.go index e4f91b97..5f69b70d 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/query/parser.go +++ b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/query/parser.go @@ -253,7 +253,7 @@ func (p *queryParser) parseFilterExpr() queryParserStateFn { } tok = p.getToken() if tok.typ != tokenKey && tok.typ != tokenString { - return p.parseError(tok, "expected key or string for filter funciton name") + return p.parseError(tok, "expected key or string for filter function name") } name := tok.val tok = p.getToken() diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/toml.go index 64f19ed3..c3e32437 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/toml.go +++ b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/toml.go @@ -11,14 +11,18 @@ import ( ) type tomlValue struct { - value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list - position Position + value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list + comment string + commented bool + position Position } // Tree is the result of the parsing of a TOML file. type Tree struct { - values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree - position Position + values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree + comment string + commented bool + position Position } func newTree() *Tree { @@ -177,14 +181,14 @@ func (t *Tree) GetDefault(key string, def interface{}) interface{} { // Set an element in the tree. // Key is a dot-separated path (e.g. a.b.c). // Creates all necessary intermediate trees, if needed. -func (t *Tree) Set(key string, value interface{}) { - t.SetPath(strings.Split(key, "."), value) +func (t *Tree) Set(key string, comment string, commented bool, value interface{}) { + t.SetPath(strings.Split(key, "."), comment, commented, value) } // SetPath sets an element in the tree. // Keys is an array of path elements (e.g. {"a","b","c"}). // Creates all necessary intermediate trees, if needed. -func (t *Tree) SetPath(keys []string, value interface{}) { +func (t *Tree) SetPath(keys []string, comment string, commented bool, value interface{}) { subtree := t for _, intermediateKey := range keys[:len(keys)-1] { nextTree, exists := subtree.values[intermediateKey] @@ -209,13 +213,17 @@ func (t *Tree) SetPath(keys []string, value interface{}) { switch value.(type) { case *Tree: + tt := value.(*Tree) + tt.comment = comment toInsert = value case []*Tree: toInsert = value case *tomlValue: - toInsert = value + tt := value.(*tomlValue) + tt.comment = comment + toInsert = tt default: - toInsert = &tomlValue{value: value} + toInsert = &tomlValue{value: value, comment: comment, commented: commented} } subtree.values[keys[len(keys)-1]] = toInsert diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/tomltree_create.go index 19d1c0dc..79610e9b 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/tomltree_create.go +++ b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/tomltree_create.go @@ -104,7 +104,7 @@ func sliceToTree(object interface{}) (interface{}, error) { } arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) } - return &tomlValue{arrayValue.Interface(), Position{}}, nil + return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil } func toTree(object interface{}) (interface{}, error) { @@ -127,7 +127,7 @@ func toTree(object interface{}) (interface{}, error) { } values[key.String()] = newValue } - return &Tree{values, Position{}}, nil + return &Tree{values: values, position: Position{}}, nil } if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { @@ -138,5 +138,5 @@ func toTree(object interface{}) (interface{}, error) { if err != nil { return nil, err } - return &tomlValue{simpleValue, Position{}}, nil + return &tomlValue{value: simpleValue, position: Position{}}, nil } diff --git a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/tomltree_write.go index ca763ed5..449f35a4 100644 --- a/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ b/vendor/github.com/mattermost/platform/vendor/github.com/pelletier/go-toml/tomltree_write.go @@ -118,7 +118,24 @@ func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) ( return bytesCount, err } - writtenBytesCount, err := writeStrings(w, indent, k, " = ", repr, "\n") + if v.comment != "" { + comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + + var commented string + if v.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n") bytesCount += int64(writtenBytesCount) if err != nil { return bytesCount, err @@ -132,11 +149,31 @@ func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) ( if keyspace != "" { combinedKey = keyspace + "." + combinedKey } + var commented string + if t.commented { + commented = "# " + } switch node := v.(type) { // node has to be of those two types given how keys are sorted above case *Tree: - writtenBytesCount, err := writeStrings(w, "\n", indent, "[", combinedKey, "]\n") + tv, ok := t.values[k].(*Tree) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + if tv.comment != "" { + comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") bytesCount += int64(writtenBytesCount) if err != nil { return bytesCount, err @@ -147,7 +184,7 @@ func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) ( } case []*Tree: for _, subTree := range node { - writtenBytesCount, err := writeStrings(w, "\n", indent, "[[", combinedKey, "]]\n") + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") bytesCount += int64(writtenBytesCount) if err != nil { return bytesCount, err diff --git a/vendor/github.com/mattermost/platform/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/github.com/mattermost/platform/vendor/gopkg.in/yaml.v2/scannerc.go index 2c9d5111..07448445 100644 --- a/vendor/github.com/mattermost/platform/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/github.com/mattermost/platform/vendor/gopkg.in/yaml.v2/scannerc.go @@ -611,7 +611,7 @@ func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, co if directive { context = "while parsing a %TAG directive" } - return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) } func trace(args ...interface{}) func() { @@ -1944,7 +1944,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma } else { // It's either the '!' tag or not really a tag handle. If it's a %TAG // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && !(s[0] == '!' && s[1] == 0) { + if directive && string(s) != "!" { yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find expected '!'") return false @@ -1959,6 +1959,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { //size_t length = head ? strlen((char *)head) : 0 var s []byte + hasTag := len(head) > 0 // Copy the head if needed. // @@ -2000,10 +2001,10 @@ func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } + hasTag = true } - // Check if the tag is non-empty. - if len(s) == 0 { + if !hasTag { yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find expected tag URI") return false diff --git a/vendor/manifest b/vendor/manifest index 44dd7cec..0f6fc0e8 100644 --- a/vendor/manifest +++ b/vendor/manifest @@ -303,92 +303,119 @@ "notests": true }, { - "importpath": "github.com/mattermost/platform/einterfaces", - "repository": "https://github.com/mattermost/platform", + "importpath": "github.com/mattermost/mattermost-server/einterfaces", + "repository": "https://github.com/mattermost/mattermost-server", "vcs": "git", - "revision": "0033e3e37b12cb5d951d21492500d66a6abc472b", - "branch": "release-4.1", + "revision": "1b064c674a21fbee4000fdf84fbdc75da1fab1f0", + "branch": "master", "path": "einterfaces", "notests": true }, { - "importpath": "github.com/mattermost/platform/model", - "repository": "https://github.com/mattermost/platform", + "importpath": "github.com/mattermost/mattermost-server/model", + "repository": "https://github.com/mattermost/mattermost-server", "vcs": "git", - "revision": "0033e3e37b12cb5d951d21492500d66a6abc472b", - "branch": "release-4.1", + "revision": "1b064c674a21fbee4000fdf84fbdc75da1fab1f0", + "branch": "master", "path": "/model", "notests": true }, { - "importpath": "github.com/mattermost/platform/vendor/github.com/alecthomas/log4go", - "repository": "https://github.com/mattermost/platform", + "importpath": "github.com/mattermost/mattermost-server/vendor/github.com/alecthomas/log4go", + "repository": "https://github.com/mattermost/mattermost-server", "vcs": "git", - "revision": "8ec37570742b67fd640bb3434ea226c655dbf408", - "branch": "release-3.7", + "revision": "1b064c674a21fbee4000fdf84fbdc75da1fab1f0", + "branch": "master", "path": "vendor/github.com/alecthomas/log4go", "notests": true }, { - "importpath": "github.com/mattermost/platform/vendor/github.com/gorilla/websocket", - "repository": "https://github.com/mattermost/platform", + "importpath": "github.com/mattermost/mattermost-server/vendor/github.com/go-ldap/ldap", + "repository": "https://github.com/mattermost/mattermost-server", "vcs": "git", - "revision": "0033e3e37b12cb5d951d21492500d66a6abc472b", - "branch": "release-4.1", - "path": "vendor/github.com/gorilla/websocket", + "revision": "1b064c674a21fbee4000fdf84fbdc75da1fab1f0", + "branch": "master", + "path": "vendor/github.com/go-ldap/ldap", "notests": true }, { - "importpath": "github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n", - "repository": "https://github.com/mattermost/platform", + "importpath": "github.com/mattermost/mattermost-server/vendor/github.com/pborman/uuid", + "repository": "https://github.com/mattermost/mattermost-server", "vcs": "git", - "revision": "0033e3e37b12cb5d951d21492500d66a6abc472b", - "branch": "release-4.1", - "path": "vendor/github.com/nicksnyder/go-i18n/i18n", + "revision": "1b064c674a21fbee4000fdf84fbdc75da1fab1f0", + "branch": "master", + "path": "vendor/github.com/pborman/uuid", "notests": true }, { - "importpath": "github.com/mattermost/platform/vendor/github.com/pborman/uuid", - "repository": "https://github.com/mattermost/platform", + "importpath": "github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/bcrypt", + "repository": "https://github.com/mattermost/mattermost-server", "vcs": "git", - "revision": "0033e3e37b12cb5d951d21492500d66a6abc472b", - "branch": "release-4.1", - "path": "vendor/github.com/pborman/uuid", + "revision": "1b064c674a21fbee4000fdf84fbdc75da1fab1f0", + "branch": "master", + "path": "vendor/golang.org/x/crypto/bcrypt", "notests": true }, { - "importpath": "github.com/mattermost/platform/vendor/github.com/pelletier/go-toml", + "importpath": "github.com/mattermost/mattermost-server/vendor/golang.org/x/crypto/blowfish", + "repository": "https://github.com/mattermost/mattermost-server", + "vcs": "git", + "revision": "1b064c674a21fbee4000fdf84fbdc75da1fab1f0", + "branch": "master", + "path": "vendor/golang.org/x/crypto/blowfish", + "notests": true + }, + { + "importpath": "github.com/mattermost/mattermost-server/vendor/gopkg.in/asn1-ber.v1", + "repository": "https://github.com/mattermost/mattermost-server", + "vcs": "git", + "revision": "1b064c674a21fbee4000fdf84fbdc75da1fab1f0", + "branch": "master", + "path": "vendor/gopkg.in/asn1-ber.v1", + "notests": true + }, + { + "importpath": "github.com/mattermost/mattermost-server/vendor/gopkg.in/yaml.v2", + "repository": "https://github.com/mattermost/mattermost-server", + "vcs": "git", + "revision": "1b064c674a21fbee4000fdf84fbdc75da1fab1f0", + "branch": "master", + "path": "vendor/gopkg.in/yaml.v2", + "notests": true + }, + { + "importpath": "github.com/mattermost/platform/model", "repository": "https://github.com/mattermost/platform", "vcs": "git", - "revision": "0033e3e37b12cb5d951d21492500d66a6abc472b", - "branch": "release-4.1", - "path": "vendor/github.com/pelletier/go-toml", + "revision": "cc82749d4f8c47bce201123aedcd8c564ceffcb8", + "branch": "release-4.6", + "path": "/model", "notests": true }, { - "importpath": "github.com/mattermost/platform/vendor/golang.org/x/crypto/bcrypt", + "importpath": "github.com/mattermost/platform/vendor/github.com/nicksnyder/go-i18n/i18n", "repository": "https://github.com/mattermost/platform", "vcs": "git", - "revision": "0033e3e37b12cb5d951d21492500d66a6abc472b", - "branch": "release-4.1", - "path": "vendor/golang.org/x/crypto/bcrypt", + "revision": "cc82749d4f8c47bce201123aedcd8c564ceffcb8", + "branch": "release-4.6", + "path": "vendor/github.com/nicksnyder/go-i18n/i18n", "notests": true }, { - "importpath": "github.com/mattermost/platform/vendor/golang.org/x/crypto/blowfish", + "importpath": "github.com/mattermost/platform/vendor/github.com/pelletier/go-toml", "repository": "https://github.com/mattermost/platform", "vcs": "git", - "revision": "0033e3e37b12cb5d951d21492500d66a6abc472b", - "branch": "release-4.1", - "path": "vendor/golang.org/x/crypto/blowfish", + "revision": "cc82749d4f8c47bce201123aedcd8c564ceffcb8", + "branch": "release-4.6", + "path": "vendor/github.com/pelletier/go-toml", "notests": true }, { "importpath": "github.com/mattermost/platform/vendor/gopkg.in/yaml.v2", "repository": "https://github.com/mattermost/platform", "vcs": "git", - "revision": "0033e3e37b12cb5d951d21492500d66a6abc472b", - "branch": "release-4.1", + "revision": "cc82749d4f8c47bce201123aedcd8c564ceffcb8", + "branch": "release-4.6", "path": "vendor/gopkg.in/yaml.v2", "notests": true }, |