summaryrefslogtreecommitdiffstats
path: root/vendor/gopkg.in
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/gopkg.in')
-rw-r--r--vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore23
-rw-r--r--vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml6
-rw-r--r--vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE21
-rw-r--r--vendor/gopkg.in/natefinch/lumberjack.v2/README.md179
-rw-r--r--vendor/gopkg.in/natefinch/lumberjack.v2/chown.go11
-rw-r--r--vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go19
-rw-r--r--vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go541
-rw-r--r--vendor/gopkg.in/yaml.v2/.travis.yml3
-rw-r--r--vendor/gopkg.in/yaml.v2/LICENSE389
-rw-r--r--vendor/gopkg.in/yaml.v2/NOTICE13
-rw-r--r--vendor/gopkg.in/yaml.v2/README.md4
-rw-r--r--vendor/gopkg.in/yaml.v2/apic.go55
-rw-r--r--vendor/gopkg.in/yaml.v2/decode.go246
-rw-r--r--vendor/gopkg.in/yaml.v2/emitterc.go20
-rw-r--r--vendor/gopkg.in/yaml.v2/encode.go136
-rw-r--r--vendor/gopkg.in/yaml.v2/go.mod5
-rw-r--r--vendor/gopkg.in/yaml.v2/parserc.go1
-rw-r--r--vendor/gopkg.in/yaml.v2/readerc.go20
-rw-r--r--vendor/gopkg.in/yaml.v2/resolve.go91
-rw-r--r--vendor/gopkg.in/yaml.v2/scannerc.go40
-rw-r--r--vendor/gopkg.in/yaml.v2/sorter.go9
-rw-r--r--vendor/gopkg.in/yaml.v2/writerc.go65
-rw-r--r--vendor/gopkg.in/yaml.v2/yaml.go136
-rw-r--r--vendor/gopkg.in/yaml.v2/yamlh.go32
24 files changed, 1596 insertions, 469 deletions
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore b/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore
new file mode 100644
index 00000000..83656241
--- /dev/null
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml
new file mode 100644
index 00000000..65dcbc56
--- /dev/null
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml
@@ -0,0 +1,6 @@
+language: go
+
+go:
+ - 1.8
+ - 1.7
+ - 1.6 \ No newline at end of file
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE b/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE
new file mode 100644
index 00000000..c3d4cc30
--- /dev/null
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Nate Finch
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE. \ No newline at end of file
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/README.md b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md
new file mode 100644
index 00000000..060eae52
--- /dev/null
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md
@@ -0,0 +1,179 @@
+# lumberjack [![GoDoc](https://godoc.org/gopkg.in/natefinch/lumberjack.v2?status.png)](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [![Build Status](https://travis-ci.org/natefinch/lumberjack.svg?branch=v2.0)](https://travis-ci.org/natefinch/lumberjack) [![Build status](https://ci.appveyor.com/api/projects/status/00gchpxtg4gkrt5d)](https://ci.appveyor.com/project/natefinch/lumberjack) [![Coverage Status](https://coveralls.io/repos/natefinch/lumberjack/badge.svg?branch=v2.0)](https://coveralls.io/r/natefinch/lumberjack?branch=v2.0)
+
+### Lumberjack is a Go package for writing logs to rolling files.
+
+Package lumberjack provides a rolling logger.
+
+Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
+thusly:
+
+ import "gopkg.in/natefinch/lumberjack.v2"
+
+The package name remains simply lumberjack, and the code resides at
+https://github.com/natefinch/lumberjack under the v2.0 branch.
+
+Lumberjack is intended to be one part of a logging infrastructure.
+It is not an all-in-one solution, but instead is a pluggable
+component at the bottom of the logging stack that simply controls the files
+to which logs are written.
+
+Lumberjack plays well with any logging package that can write to an
+io.Writer, including the standard library's log package.
+
+Lumberjack assumes that only one process is writing to the output files.
+Using the same lumberjack configuration from multiple processes on the same
+machine will result in improper behavior.
+
+
+**Example**
+
+To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts.
+
+Code:
+
+```go
+log.SetOutput(&lumberjack.Logger{
+ Filename: "/var/log/myapp/foo.log",
+ MaxSize: 500, // megabytes
+ MaxBackups: 3,
+ MaxAge: 28, //days
+ Compress: true, // disabled by default
+})
+```
+
+
+
+## type Logger
+``` go
+type Logger struct {
+ // Filename is the file to write logs to. Backup log files will be retained
+ // in the same directory. It uses <processname>-lumberjack.log in
+ // os.TempDir() if empty.
+ Filename string `json:"filename" yaml:"filename"`
+
+ // MaxSize is the maximum size in megabytes of the log file before it gets
+ // rotated. It defaults to 100 megabytes.
+ MaxSize int `json:"maxsize" yaml:"maxsize"`
+
+ // MaxAge is the maximum number of days to retain old log files based on the
+ // timestamp encoded in their filename. Note that a day is defined as 24
+ // hours and may not exactly correspond to calendar days due to daylight
+ // savings, leap seconds, etc. The default is not to remove old log files
+ // based on age.
+ MaxAge int `json:"maxage" yaml:"maxage"`
+
+ // MaxBackups is the maximum number of old log files to retain. The default
+ // is to retain all old log files (though MaxAge may still cause them to get
+ // deleted.)
+ MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
+
+ // LocalTime determines if the time used for formatting the timestamps in
+ // backup files is the computer's local time. The default is to use UTC
+ // time.
+ LocalTime bool `json:"localtime" yaml:"localtime"`
+
+ // Compress determines if the rotated log files should be compressed
+ // using gzip. The default is not to perform compression.
+ Compress bool `json:"compress" yaml:"compress"`
+ // contains filtered or unexported fields
+}
+```
+Logger is an io.WriteCloser that writes to the specified filename.
+
+Logger opens or creates the logfile on first Write. If the file exists and
+is less than MaxSize megabytes, lumberjack will open and append to that file.
+If the file exists and its size is >= MaxSize megabytes, the file is renamed
+by putting the current time in a timestamp in the name immediately before the
+file's extension (or the end of the filename if there's no extension). A new
+log file is then created using original filename.
+
+Whenever a write would cause the current log file exceed MaxSize megabytes,
+the current file is closed, renamed, and a new log file created with the
+original name. Thus, the filename you give Logger is always the "current" log
+file.
+
+Backups use the log file name given to Logger, in the form `name-timestamp.ext`
+where name is the filename without the extension, timestamp is the time at which
+the log was rotated formatted with the time.Time format of
+`2006-01-02T15-04-05.000` and the extension is the original extension. For
+example, if your Logger.Filename is `/var/log/foo/server.log`, a backup created
+at 6:30pm on Nov 11 2016 would use the filename
+`/var/log/foo/server-2016-11-04T18-30-00.000.log`
+
+### Cleaning Up Old Log Files
+Whenever a new logfile gets created, old log files may be deleted. The most
+recent files according to the encoded timestamp will be retained, up to a
+number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
+with an encoded timestamp older than MaxAge days are deleted, regardless of
+MaxBackups. Note that the time encoded in the timestamp is the rotation
+time, which may differ from the last time that file was written to.
+
+If MaxBackups and MaxAge are both 0, no old log files will be deleted.
+
+
+
+
+
+
+
+
+
+
+
+### func (\*Logger) Close
+``` go
+func (l *Logger) Close() error
+```
+Close implements io.Closer, and closes the current logfile.
+
+
+
+### func (\*Logger) Rotate
+``` go
+func (l *Logger) Rotate() error
+```
+Rotate causes Logger to close the existing log file and immediately create a
+new one. This is a helper function for applications that want to initiate
+rotations outside of the normal rotation rules, such as in response to
+SIGHUP. After rotating, this initiates a cleanup of old log files according
+to the normal rules.
+
+**Example**
+
+Example of how to rotate in response to SIGHUP.
+
+Code:
+
+```go
+l := &lumberjack.Logger{}
+log.SetOutput(l)
+c := make(chan os.Signal, 1)
+signal.Notify(c, syscall.SIGHUP)
+
+go func() {
+ for {
+ <-c
+ l.Rotate()
+ }
+}()
+```
+
+### func (\*Logger) Write
+``` go
+func (l *Logger) Write(p []byte) (n int, err error)
+```
+Write implements io.Writer. If a write would cause the log file to be larger
+than MaxSize, the file is closed, renamed to include a timestamp of the
+current time, and a new log file is created using the original log file name.
+If the length of the write is greater than MaxSize, an error is returned.
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go
new file mode 100644
index 00000000..11d06697
--- /dev/null
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go
@@ -0,0 +1,11 @@
+// +build !linux
+
+package lumberjack
+
+import (
+ "os"
+)
+
+func chown(_ string, _ os.FileInfo) error {
+ return nil
+}
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go
new file mode 100644
index 00000000..2758ec9c
--- /dev/null
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go
@@ -0,0 +1,19 @@
+package lumberjack
+
+import (
+ "os"
+ "syscall"
+)
+
+// os_Chown is a var so we can mock it out during tests.
+var os_Chown = os.Chown
+
+func chown(name string, info os.FileInfo) error {
+ f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode())
+ if err != nil {
+ return err
+ }
+ f.Close()
+ stat := info.Sys().(*syscall.Stat_t)
+ return os_Chown(name, int(stat.Uid), int(stat.Gid))
+}
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go
new file mode 100644
index 00000000..46d97c55
--- /dev/null
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go
@@ -0,0 +1,541 @@
+// Package lumberjack provides a rolling logger.
+//
+// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
+// thusly:
+//
+// import "gopkg.in/natefinch/lumberjack.v2"
+//
+// The package name remains simply lumberjack, and the code resides at
+// https://github.com/natefinch/lumberjack under the v2.0 branch.
+//
+// Lumberjack is intended to be one part of a logging infrastructure.
+// It is not an all-in-one solution, but instead is a pluggable
+// component at the bottom of the logging stack that simply controls the files
+// to which logs are written.
+//
+// Lumberjack plays well with any logging package that can write to an
+// io.Writer, including the standard library's log package.
+//
+// Lumberjack assumes that only one process is writing to the output files.
+// Using the same lumberjack configuration from multiple processes on the same
+// machine will result in improper behavior.
+package lumberjack
+
+import (
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ backupTimeFormat = "2006-01-02T15-04-05.000"
+ compressSuffix = ".gz"
+ defaultMaxSize = 100
+)
+
+// ensure we always implement io.WriteCloser
+var _ io.WriteCloser = (*Logger)(nil)
+
+// Logger is an io.WriteCloser that writes to the specified filename.
+//
+// Logger opens or creates the logfile on first Write. If the file exists and
+// is less than MaxSize megabytes, lumberjack will open and append to that file.
+// If the file exists and its size is >= MaxSize megabytes, the file is renamed
+// by putting the current time in a timestamp in the name immediately before the
+// file's extension (or the end of the filename if there's no extension). A new
+// log file is then created using original filename.
+//
+// Whenever a write would cause the current log file exceed MaxSize megabytes,
+// the current file is closed, renamed, and a new log file created with the
+// original name. Thus, the filename you give Logger is always the "current" log
+// file.
+//
+// Backups use the log file name given to Logger, in the form
+// `name-timestamp.ext` where name is the filename without the extension,
+// timestamp is the time at which the log was rotated formatted with the
+// time.Time format of `2006-01-02T15-04-05.000` and the extension is the
+// original extension. For example, if your Logger.Filename is
+// `/var/log/foo/server.log`, a backup created at 6:30pm on Nov 11 2016 would
+// use the filename `/var/log/foo/server-2016-11-04T18-30-00.000.log`
+//
+// Cleaning Up Old Log Files
+//
+// Whenever a new logfile gets created, old log files may be deleted. The most
+// recent files according to the encoded timestamp will be retained, up to a
+// number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
+// with an encoded timestamp older than MaxAge days are deleted, regardless of
+// MaxBackups. Note that the time encoded in the timestamp is the rotation
+// time, which may differ from the last time that file was written to.
+//
+// If MaxBackups and MaxAge are both 0, no old log files will be deleted.
+type Logger struct {
+ // Filename is the file to write logs to. Backup log files will be retained
+ // in the same directory. It uses <processname>-lumberjack.log in
+ // os.TempDir() if empty.
+ Filename string `json:"filename" yaml:"filename"`
+
+ // MaxSize is the maximum size in megabytes of the log file before it gets
+ // rotated. It defaults to 100 megabytes.
+ MaxSize int `json:"maxsize" yaml:"maxsize"`
+
+ // MaxAge is the maximum number of days to retain old log files based on the
+ // timestamp encoded in their filename. Note that a day is defined as 24
+ // hours and may not exactly correspond to calendar days due to daylight
+ // savings, leap seconds, etc. The default is not to remove old log files
+ // based on age.
+ MaxAge int `json:"maxage" yaml:"maxage"`
+
+ // MaxBackups is the maximum number of old log files to retain. The default
+ // is to retain all old log files (though MaxAge may still cause them to get
+ // deleted.)
+ MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
+
+ // LocalTime determines if the time used for formatting the timestamps in
+ // backup files is the computer's local time. The default is to use UTC
+ // time.
+ LocalTime bool `json:"localtime" yaml:"localtime"`
+
+ // Compress determines if the rotated log files should be compressed
+ // using gzip. The default is not to perform compression.
+ Compress bool `json:"compress" yaml:"compress"`
+
+ size int64
+ file *os.File
+ mu sync.Mutex
+
+ millCh chan bool
+ startMill sync.Once
+}
+
+var (
+ // currentTime exists so it can be mocked out by tests.
+ currentTime = time.Now
+
+ // os_Stat exists so it can be mocked out by tests.
+ os_Stat = os.Stat
+
+ // megabyte is the conversion factor between MaxSize and bytes. It is a
+ // variable so tests can mock it out and not need to write megabytes of data
+ // to disk.
+ megabyte = 1024 * 1024
+)
+
+// Write implements io.Writer. If a write would cause the log file to be larger
+// than MaxSize, the file is closed, renamed to include a timestamp of the
+// current time, and a new log file is created using the original log file name.
+// If the length of the write is greater than MaxSize, an error is returned.
+func (l *Logger) Write(p []byte) (n int, err error) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ writeLen := int64(len(p))
+ if writeLen > l.max() {
+ return 0, fmt.Errorf(
+ "write length %d exceeds maximum file size %d", writeLen, l.max(),
+ )
+ }
+
+ if l.file == nil {
+ if err = l.openExistingOrNew(len(p)); err != nil {
+ return 0, err
+ }
+ }
+
+ if l.size+writeLen > l.max() {
+ if err := l.rotate(); err != nil {
+ return 0, err
+ }
+ }
+
+ n, err = l.file.Write(p)
+ l.size += int64(n)
+
+ return n, err
+}
+
+// Close implements io.Closer, and closes the current logfile.
+func (l *Logger) Close() error {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ return l.close()
+}
+
+// close closes the file if it is open.
+func (l *Logger) close() error {
+ if l.file == nil {
+ return nil
+ }
+ err := l.file.Close()
+ l.file = nil
+ return err
+}
+
+// Rotate causes Logger to close the existing log file and immediately create a
+// new one. This is a helper function for applications that want to initiate
+// rotations outside of the normal rotation rules, such as in response to
+// SIGHUP. After rotating, this initiates compression and removal of old log
+// files according to the configuration.
+func (l *Logger) Rotate() error {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ return l.rotate()
+}
+
+// rotate closes the current file, moves it aside with a timestamp in the name,
+// (if it exists), opens a new file with the original filename, and then runs
+// post-rotation processing and removal.
+func (l *Logger) rotate() error {
+ if err := l.close(); err != nil {
+ return err
+ }
+ if err := l.openNew(); err != nil {
+ return err
+ }
+ l.mill()
+ return nil
+}
+
+// openNew opens a new log file for writing, moving any old log file out of the
+// way. This methods assumes the file has already been closed.
+func (l *Logger) openNew() error {
+ err := os.MkdirAll(l.dir(), 0744)
+ if err != nil {
+ return fmt.Errorf("can't make directories for new logfile: %s", err)
+ }
+
+ name := l.filename()
+ mode := os.FileMode(0644)
+ info, err := os_Stat(name)
+ if err == nil {
+ // Copy the mode off the old logfile.
+ mode = info.Mode()
+ // move the existing file
+ newname := backupName(name, l.LocalTime)
+ if err := os.Rename(name, newname); err != nil {
+ return fmt.Errorf("can't rename log file: %s", err)
+ }
+
+ // this is a no-op anywhere but linux
+ if err := chown(name, info); err != nil {
+ return err
+ }
+ }
+
+ // we use truncate here because this should only get called when we've moved
+ // the file ourselves. if someone else creates the file in the meantime,
+ // just wipe out the contents.
+ f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
+ if err != nil {
+ return fmt.Errorf("can't open new logfile: %s", err)
+ }
+ l.file = f
+ l.size = 0
+ return nil
+}
+
+// backupName creates a new filename from the given name, inserting a timestamp
+// between the filename and the extension, using the local time if requested
+// (otherwise UTC).
+func backupName(name string, local bool) string {
+ dir := filepath.Dir(name)
+ filename := filepath.Base(name)
+ ext := filepath.Ext(filename)
+ prefix := filename[:len(filename)-len(ext)]
+ t := currentTime()
+ if !local {
+ t = t.UTC()
+ }
+
+ timestamp := t.Format(backupTimeFormat)
+ return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
+}
+
+// openExistingOrNew opens the logfile if it exists and if the current write
+// would not put it over MaxSize. If there is no such file or the write would
+// put it over the MaxSize, a new file is created.
+func (l *Logger) openExistingOrNew(writeLen int) error {
+ l.mill()
+
+ filename := l.filename()
+ info, err := os_Stat(filename)
+ if os.IsNotExist(err) {
+ return l.openNew()
+ }
+ if err != nil {
+ return fmt.Errorf("error getting log file info: %s", err)
+ }
+
+ if info.Size()+int64(writeLen) >= l.max() {
+ return l.rotate()
+ }
+
+ file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
+ if err != nil {
+ // if we fail to open the old log file for some reason, just ignore
+ // it and open a new log file.
+ return l.openNew()
+ }
+ l.file = file
+ l.size = info.Size()
+ return nil
+}
+
+// genFilename generates the name of the logfile from the current time.
+func (l *Logger) filename() string {
+ if l.Filename != "" {
+ return l.Filename
+ }
+ name := filepath.Base(os.Args[0]) + "-lumberjack.log"
+ return filepath.Join(os.TempDir(), name)
+}
+
+// millRunOnce performs compression and removal of stale log files.
+// Log files are compressed if enabled via configuration and old log
+// files are removed, keeping at most l.MaxBackups files, as long as
+// none of them are older than MaxAge.
+func (l *Logger) millRunOnce() error {
+ if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress {
+ return nil
+ }
+
+ files, err := l.oldLogFiles()
+ if err != nil {
+ return err
+ }
+
+ var compress, remove []logInfo
+
+ if l.MaxBackups > 0 && l.MaxBackups < len(files) {
+ preserved := make(map[string]bool)
+ var remaining []logInfo
+ for _, f := range files {
+ // Only count the uncompressed log file or the
+ // compressed log file, not both.
+ fn := f.Name()
+ if strings.HasSuffix(fn, compressSuffix) {
+ fn = fn[:len(fn)-len(compressSuffix)]
+ }
+ preserved[fn] = true
+
+ if len(preserved) > l.MaxBackups {
+ remove = append(remove, f)
+ } else {
+ remaining = append(remaining, f)
+ }
+ }
+ files = remaining
+ }
+ if l.MaxAge > 0 {
+ diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
+ cutoff := currentTime().Add(-1 * diff)
+
+ var remaining []logInfo
+ for _, f := range files {
+ if f.timestamp.Before(cutoff) {
+ remove = append(remove, f)
+ } else {
+ remaining = append(remaining, f)
+ }
+ }
+ files = remaining
+ }
+
+ if l.Compress {
+ for _, f := range files {
+ if !strings.HasSuffix(f.Name(), compressSuffix) {
+ compress = append(compress, f)
+ }
+ }
+ }
+
+ for _, f := range remove {
+ errRemove := os.Remove(filepath.Join(l.dir(), f.Name()))
+ if err == nil && errRemove != nil {
+ err = errRemove
+ }
+ }
+ for _, f := range compress {
+ fn := filepath.Join(l.dir(), f.Name())
+ errCompress := compressLogFile(fn, fn+compressSuffix)
+ if err == nil && errCompress != nil {
+ err = errCompress
+ }
+ }
+
+ return err
+}
+
+// millRun runs in a goroutine to manage post-rotation compression and removal
+// of old log files.
+func (l *Logger) millRun() {
+ for _ = range l.millCh {
+ // what am I going to do, log this?
+ _ = l.millRunOnce()
+ }
+}
+
+// mill performs post-rotation compression and removal of stale log files,
+// starting the mill goroutine if necessary.
+func (l *Logger) mill() {
+ l.startMill.Do(func() {
+ l.millCh = make(chan bool, 1)
+ go l.millRun()
+ })
+ select {
+ case l.millCh <- true:
+ default:
+ }
+}
+
+// oldLogFiles returns the list of backup log files stored in the same
+// directory as the current log file, sorted by ModTime
+func (l *Logger) oldLogFiles() ([]logInfo, error) {
+ files, err := ioutil.ReadDir(l.dir())
+ if err != nil {
+ return nil, fmt.Errorf("can't read log file directory: %s", err)
+ }
+ logFiles := []logInfo{}
+
+ prefix, ext := l.prefixAndExt()
+
+ for _, f := range files {
+ if f.IsDir() {
+ continue
+ }
+ if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil {
+ logFiles = append(logFiles, logInfo{t, f})
+ continue
+ }
+ if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil {
+ logFiles = append(logFiles, logInfo{t, f})
+ continue
+ }
+ // error parsing means that the suffix at the end was not generated
+ // by lumberjack, and therefore it's not a backup file.
+ }
+
+ sort.Sort(byFormatTime(logFiles))
+
+ return logFiles, nil
+}
+
+// timeFromName extracts the formatted time from the filename by stripping off
+// the filename's prefix and extension. This prevents someone's filename from
+// confusing time.parse.
+func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) {
+ if !strings.HasPrefix(filename, prefix) {
+ return time.Time{}, errors.New("mismatched prefix")
+ }
+ if !strings.HasSuffix(filename, ext) {
+ return time.Time{}, errors.New("mismatched extension")
+ }
+ ts := filename[len(prefix) : len(filename)-len(ext)]
+ return time.Parse(backupTimeFormat, ts)
+}
+
+// max returns the maximum size in bytes of log files before rolling.
+func (l *Logger) max() int64 {
+ if l.MaxSize == 0 {
+ return int64(defaultMaxSize * megabyte)
+ }
+ return int64(l.MaxSize) * int64(megabyte)
+}
+
+// dir returns the directory for the current filename.
+func (l *Logger) dir() string {
+ return filepath.Dir(l.filename())
+}
+
+// prefixAndExt returns the filename part and extension part from the Logger's
+// filename.
+func (l *Logger) prefixAndExt() (prefix, ext string) {
+ filename := filepath.Base(l.filename())
+ ext = filepath.Ext(filename)
+ prefix = filename[:len(filename)-len(ext)] + "-"
+ return prefix, ext
+}
+
+// compressLogFile compresses the given log file, removing the
+// uncompressed log file if successful.
+func compressLogFile(src, dst string) (err error) {
+ f, err := os.Open(src)
+ if err != nil {
+ return fmt.Errorf("failed to open log file: %v", err)
+ }
+ defer f.Close()
+
+ fi, err := os_Stat(src)
+ if err != nil {
+ return fmt.Errorf("failed to stat log file: %v", err)
+ }
+
+ if err := chown(dst, fi); err != nil {
+ return fmt.Errorf("failed to chown compressed log file: %v", err)
+ }
+
+ // If this file already exists, we presume it was created by
+ // a previous attempt to compress the log file.
+ gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode())
+ if err != nil {
+ return fmt.Errorf("failed to open compressed log file: %v", err)
+ }
+ defer gzf.Close()
+
+ gz := gzip.NewWriter(gzf)
+
+ defer func() {
+ if err != nil {
+ os.Remove(dst)
+ err = fmt.Errorf("failed to compress log file: %v", err)
+ }
+ }()
+
+ if _, err := io.Copy(gz, f); err != nil {
+ return err
+ }
+ if err := gz.Close(); err != nil {
+ return err
+ }
+ if err := gzf.Close(); err != nil {
+ return err
+ }
+
+ if err := f.Close(); err != nil {
+ return err
+ }
+ if err := os.Remove(src); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// logInfo is a convenience struct to return the filename and its embedded
+// timestamp.
+type logInfo struct {
+ timestamp time.Time
+ os.FileInfo
+}
+
+// byFormatTime sorts by newest time formatted in the name.
+type byFormatTime []logInfo
+
+func (b byFormatTime) Less(i, j int) bool {
+ return b[i].timestamp.After(b[j].timestamp)
+}
+
+func (b byFormatTime) Swap(i, j int) {
+ b[i], b[j] = b[j], b[i]
+}
+
+func (b byFormatTime) Len() int {
+ return len(b)
+}
diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml
index 004172a2..9f556934 100644
--- a/vendor/gopkg.in/yaml.v2/.travis.yml
+++ b/vendor/gopkg.in/yaml.v2/.travis.yml
@@ -4,6 +4,9 @@ go:
- 1.4
- 1.5
- 1.6
+ - 1.7
+ - 1.8
+ - 1.9
- tip
go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE
index a68e67f0..8dada3ed 100644
--- a/vendor/gopkg.in/yaml.v2/LICENSE
+++ b/vendor/gopkg.in/yaml.v2/LICENSE
@@ -1,188 +1,201 @@
-
-Copyright (c) 2011-2014 - Canonical Inc.
-
-This software is licensed under the LGPLv3, included below.
-
-As a special exception to the GNU Lesser General Public License version 3
-("LGPL3"), the copyright holders of this Library give you permission to
-convey to a third party a Combined Work that links statically or dynamically
-to this Library without providing any Minimal Corresponding Source or
-Minimal Application Code as set out in 4d or providing the installation
-information set out in section 4e, provided that you comply with the other
-provisions of LGPL3 and provided that you meet, for the Application the
-terms and conditions of the license(s) which apply to the Application.
-
-Except as stated in this special exception, the provisions of LGPL3 will
-continue to comply in full to this Library. If you modify this Library, you
-may apply this exception to your version of this Library, but you are not
-obliged to do so. If you do not wish to do so, delete this exception
-statement from your version. This exception does not (and cannot) modify any
-license terms which apply to the Application, with which you must still
-comply.
-
-
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
- This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
- 0. Additional Definitions.
-
- As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
- "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
- An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
- A "Combined Work" is a work produced by combining or linking an
-Application with the Library. The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
- The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
- The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
- 1. Exception to Section 3 of the GNU GPL.
-
- You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
- 2. Conveying Modified Versions.
-
- If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
- a) under this License, provided that you make a good faith effort to
- ensure that, in the event an Application does not supply the
- function or data, the facility still operates, and performs
- whatever part of its purpose remains meaningful, or
-
- b) under the GNU GPL, with none of the additional permissions of
- this License applicable to that copy.
-
- 3. Object Code Incorporating Material from Library Header Files.
-
- The object code form of an Application may incorporate material from
-a header file that is part of the Library. You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
- a) Give prominent notice with each copy of the object code that the
- Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the object code with a copy of the GNU GPL and this license
- document.
-
- 4. Combined Works.
-
- You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
- a) Give prominent notice with each copy of the Combined Work that
- the Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the Combined Work with a copy of the GNU GPL and this license
- document.
-
- c) For a Combined Work that displays copyright notices during
- execution, include the copyright notice for the Library among
- these notices, as well as a reference directing the user to the
- copies of the GNU GPL and this license document.
-
- d) Do one of the following:
-
- 0) Convey the Minimal Corresponding Source under the terms of this
- License, and the Corresponding Application Code in a form
- suitable for, and under terms that permit, the user to
- recombine or relink the Application with a modified version of
- the Linked Version to produce a modified Combined Work, in the
- manner specified by section 6 of the GNU GPL for conveying
- Corresponding Source.
-
- 1) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (a) uses at run time
- a copy of the Library already present on the user's computer
- system, and (b) will operate properly with a modified version
- of the Library that is interface-compatible with the Linked
- Version.
-
- e) Provide Installation Information, but only if you would otherwise
- be required to provide such information under section 6 of the
- GNU GPL, and only to the extent that such information is
- necessary to install and execute a modified version of the
- Combined Work produced by recombining or relinking the
- Application with a modified version of the Linked Version. (If
- you use option 4d0, the Installation Information must accompany
- the Minimal Corresponding Source and Corresponding Application
- Code. If you use option 4d1, you must provide the Installation
- Information in the manner specified by section 6 of the GNU GPL
- for conveying Corresponding Source.)
-
- 5. Combined Libraries.
-
- You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
- a) Accompany the combined library with a copy of the same work based
- on the Library, uncombined with any other library facilities,
- conveyed under the terms of this License.
-
- b) Give prominent notice with the combined library that part of it
- is a work based on the Library, and explaining where to find the
- accompanying uncombined form of the same work.
-
- 6. Revised Versions of the GNU Lesser General Public License.
-
- The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
- If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE
new file mode 100644
index 00000000..866d74a7
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md
index 7b8bd867..b50c6e87 100644
--- a/vendor/gopkg.in/yaml.v2/README.md
+++ b/vendor/gopkg.in/yaml.v2/README.md
@@ -42,7 +42,7 @@ The package API for yaml v2 will remain stable as described in [gopkg.in](https:
License
-------
-The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
Example
@@ -65,6 +65,8 @@ b:
d: [3, 4]
`
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
type T struct {
A string
B struct {
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go
index 95ec014e..1f7e87e6 100644
--- a/vendor/gopkg.in/yaml.v2/apic.go
+++ b/vendor/gopkg.in/yaml.v2/apic.go
@@ -2,7 +2,6 @@ package yaml
import (
"io"
- "os"
)
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
@@ -48,9 +47,9 @@ func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err
return n, nil
}
-// File read handler.
-func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
- return parser.input_file.Read(buffer)
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
}
// Set a string input.
@@ -64,12 +63,12 @@ func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
}
// Set a file input.
-func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
- parser.read_handler = yaml_file_read_handler
- parser.input_file = file
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
}
// Set the source encoding.
@@ -81,14 +80,13 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
}
// Create a new emitter object.
-func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{
buffer: make([]byte, output_buffer_size),
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
}
- return true
}
// Destroy an emitter object.
@@ -102,9 +100,10 @@ func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
return nil
}
-// File write handler.
-func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
- _, err := emitter.output_file.Write(buffer)
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
return err
}
@@ -118,12 +117,12 @@ func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]by
}
// Set a file output.
-func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
- emitter.write_handler = yaml_file_write_handler
- emitter.output_file = file
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
}
// Set the output encoding.
@@ -252,41 +251,41 @@ func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
//
// Create STREAM-START.
-func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
*event = yaml_event_t{
typ: yaml_STREAM_START_EVENT,
encoding: encoding,
}
- return true
}
// Create STREAM-END.
-func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
typ: yaml_STREAM_END_EVENT,
}
- return true
}
// Create DOCUMENT-START.
-func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
- tag_directives []yaml_tag_directive_t, implicit bool) bool {
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
*event = yaml_event_t{
typ: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
- return true
}
// Create DOCUMENT-END.
-func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
*event = yaml_event_t{
typ: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
- return true
}
///*
@@ -348,7 +347,7 @@ func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
}
// Create MAPPING-START.
-func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
anchor: anchor,
@@ -356,15 +355,13 @@ func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte
implicit: implicit,
style: yaml_style_t(style),
}
- return true
}
// Create MAPPING-END.
-func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
}
- return true
}
// Destroy an event object.
@@ -471,7 +468,7 @@ func yaml_event_delete(event *yaml_event_t) {
// } context
// tag_directive *yaml_tag_directive_t
//
-// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
//
// assert(document) // Non-NULL document object is expected.
//
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go
index 085cddc4..e4e56e28 100644
--- a/vendor/gopkg.in/yaml.v2/decode.go
+++ b/vendor/gopkg.in/yaml.v2/decode.go
@@ -4,6 +4,7 @@ import (
"encoding"
"encoding/base64"
"fmt"
+ "io"
"math"
"reflect"
"strconv"
@@ -22,19 +23,22 @@ type node struct {
kind int
line, column int
tag string
- value string
- implicit bool
- children []*node
- anchors map[string]*node
+ // For an alias node, alias holds the resolved alias.
+ alias *node
+ value string
+ implicit bool
+ children []*node
+ anchors map[string]*node
}
// ----------------------------------------------------------------------------
// Parser, produces a node tree out of a libyaml event stream.
type parser struct {
- parser yaml_parser_t
- event yaml_event_t
- doc *node
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *node
+ doneInit bool
}
func newParser(b []byte) *parser {
@@ -42,21 +46,30 @@ func newParser(b []byte) *parser {
if !yaml_parser_initialize(&p.parser) {
panic("failed to initialize YAML emitter")
}
-
if len(b) == 0 {
b = []byte{'\n'}
}
-
yaml_parser_set_input_string(&p.parser, b)
+ return &p
+}
- p.skip()
- if p.event.typ != yaml_STREAM_START_EVENT {
- panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
+func newParserFromReader(r io.Reader) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
}
- p.skip()
+ yaml_parser_set_input_reader(&p.parser, r)
return &p
}
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
func (p *parser) destroy() {
if p.event.typ != yaml_NO_EVENT {
yaml_event_delete(&p.event)
@@ -64,16 +77,35 @@ func (p *parser) destroy() {
yaml_parser_delete(&p.parser)
}
-func (p *parser) skip() {
- if p.event.typ != yaml_NO_EVENT {
- if p.event.typ == yaml_STREAM_END_EVENT {
- failf("attempted to go past the end of stream; corrupted value?")
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
}
- yaml_event_delete(&p.event)
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
}
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
+ return p.event.typ
}
func (p *parser) fail() {
@@ -81,6 +113,10 @@ func (p *parser) fail() {
var line int
if p.parser.problem_mark.line != 0 {
line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
} else if p.parser.context_mark.line != 0 {
line = p.parser.context_mark.line
}
@@ -103,7 +139,8 @@ func (p *parser) anchor(n *node, anchor []byte) {
}
func (p *parser) parse() *node {
- switch p.event.typ {
+ p.init()
+ switch p.peek() {
case yaml_SCALAR_EVENT:
return p.scalar()
case yaml_ALIAS_EVENT:
@@ -118,9 +155,8 @@ func (p *parser) parse() *node {
// Happens when attempting to decode an empty buffer.
return nil
default:
- panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
+ panic("attempted to parse unknown event: " + p.event.typ.String())
}
- panic("unreachable")
}
func (p *parser) node(kind int) *node {
@@ -135,19 +171,20 @@ func (p *parser) document() *node {
n := p.node(documentNode)
n.anchors = make(map[string]*node)
p.doc = n
- p.skip()
+ p.expect(yaml_DOCUMENT_START_EVENT)
n.children = append(n.children, p.parse())
- if p.event.typ != yaml_DOCUMENT_END_EVENT {
- panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
- }
- p.skip()
+ p.expect(yaml_DOCUMENT_END_EVENT)
return n
}
func (p *parser) alias() *node {
n := p.node(aliasNode)
n.value = string(p.event.anchor)
- p.skip()
+ n.alias = p.doc.anchors[n.value]
+ if n.alias == nil {
+ failf("unknown anchor '%s' referenced", n.value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
return n
}
@@ -157,29 +194,29 @@ func (p *parser) scalar() *node {
n.tag = string(p.event.tag)
n.implicit = p.event.implicit
p.anchor(n, p.event.anchor)
- p.skip()
+ p.expect(yaml_SCALAR_EVENT)
return n
}
func (p *parser) sequence() *node {
n := p.node(sequenceNode)
p.anchor(n, p.event.anchor)
- p.skip()
- for p.event.typ != yaml_SEQUENCE_END_EVENT {
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
n.children = append(n.children, p.parse())
}
- p.skip()
+ p.expect(yaml_SEQUENCE_END_EVENT)
return n
}
func (p *parser) mapping() *node {
n := p.node(mappingNode)
p.anchor(n, p.event.anchor)
- p.skip()
- for p.event.typ != yaml_MAPPING_END_EVENT {
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
n.children = append(n.children, p.parse(), p.parse())
}
- p.skip()
+ p.expect(yaml_MAPPING_END_EVENT)
return n
}
@@ -188,9 +225,10 @@ func (p *parser) mapping() *node {
type decoder struct {
doc *node
- aliases map[string]bool
+ aliases map[*node]bool
mapType reflect.Type
terrors []string
+ strict bool
}
var (
@@ -198,11 +236,13 @@ var (
durationType = reflect.TypeOf(time.Duration(0))
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
ifaceType = defaultMapType.Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
)
-func newDecoder() *decoder {
- d := &decoder{mapType: defaultMapType}
- d.aliases = make(map[string]bool)
+func newDecoder(strict bool) *decoder {
+ d := &decoder{mapType: defaultMapType, strict: strict}
+ d.aliases = make(map[*node]bool)
return d
}
@@ -251,7 +291,7 @@ func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
//
// If n holds a null value, prepare returns before doing anything.
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
- if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
+ if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
return out, false, false
}
again := true
@@ -308,16 +348,13 @@ func (d *decoder) document(n *node, out reflect.Value) (good bool) {
}
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
- an, ok := d.doc.anchors[n.value]
- if !ok {
- failf("unknown anchor '%s' referenced", n.value)
- }
- if d.aliases[n.value] {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
failf("anchor '%s' value contains itself", n.value)
}
- d.aliases[n.value] = true
- good = d.unmarshal(an, out)
- delete(d.aliases, n.value)
+ d.aliases[n] = true
+ good = d.unmarshal(n.alias, out)
+ delete(d.aliases, n)
return good
}
@@ -329,7 +366,7 @@ func resetMap(out reflect.Value) {
}
}
-func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
+func (d *decoder) scalar(n *node, out reflect.Value) bool {
var tag string
var resolved interface{}
if n.tag == "" && !n.implicit {
@@ -353,9 +390,26 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
}
return true
}
- if s, ok := resolved.(string); ok && out.CanAddr() {
- if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
- err := u.UnmarshalText([]byte(s))
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ // We've resolved to exactly the type we want, so use that.
+ out.Set(resolvedv)
+ return true
+ }
+ // Perhaps we can use the value as a TextUnmarshaler to
+ // set its value.
+ if out.CanAddr() {
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+ if ok {
+ var text []byte
+ if tag == yaml_BINARY_TAG {
+ text = []byte(resolved.(string))
+ } else {
+ // We let any value be unmarshaled into TextUnmarshaler.
+ // That might be more lax than we'd like, but the
+ // TextUnmarshaler itself should bowl out any dubious values.
+ text = []byte(n.value)
+ }
+ err := u.UnmarshalText(text)
if err != nil {
fail(err)
}
@@ -366,46 +420,54 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
case reflect.String:
if tag == yaml_BINARY_TAG {
out.SetString(resolved.(string))
- good = true
- } else if resolved != nil {
+ return true
+ }
+ if resolved != nil {
out.SetString(n.value)
- good = true
+ return true
}
case reflect.Interface:
if resolved == nil {
out.Set(reflect.Zero(out.Type()))
+ } else if tag == yaml_TIMESTAMP_TAG {
+ // It looks like a timestamp but for backward compatibility
+ // reasons we set it as a string, so that code that unmarshals
+ // timestamp-like values into interface{} will continue to
+ // see a string and not a time.Time.
+ // TODO(v3) Drop this.
+ out.Set(reflect.ValueOf(n.value))
} else {
out.Set(reflect.ValueOf(resolved))
}
- good = true
+ return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch resolved := resolved.(type) {
case int:
if !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
- good = true
+ return true
}
case int64:
if !out.OverflowInt(resolved) {
out.SetInt(resolved)
- good = true
+ return true
}
case uint64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
- good = true
+ return true
}
case float64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
- good = true
+ return true
}
case string:
if out.Type() == durationType {
d, err := time.ParseDuration(resolved)
if err == nil {
out.SetInt(int64(d))
- good = true
+ return true
}
}
}
@@ -414,44 +476,49 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
case int:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
- good = true
+ return true
}
case int64:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
- good = true
+ return true
}
case uint64:
if !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
- good = true
+ return true
}
case float64:
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
- good = true
+ return true
}
}
case reflect.Bool:
switch resolved := resolved.(type) {
case bool:
out.SetBool(resolved)
- good = true
+ return true
}
case reflect.Float32, reflect.Float64:
switch resolved := resolved.(type) {
case int:
out.SetFloat(float64(resolved))
- good = true
+ return true
case int64:
out.SetFloat(float64(resolved))
- good = true
+ return true
case uint64:
out.SetFloat(float64(resolved))
- good = true
+ return true
case float64:
out.SetFloat(resolved)
- good = true
+ return true
+ }
+ case reflect.Struct:
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ out.Set(resolvedv)
+ return true
}
case reflect.Ptr:
if out.Type().Elem() == reflect.TypeOf(resolved) {
@@ -459,13 +526,11 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
elem := reflect.New(out.Type().Elem())
elem.Elem().Set(reflect.ValueOf(resolved))
out.Set(elem)
- good = true
+ return true
}
}
- if !good {
- d.terror(n, tag, out)
- }
- return good
+ d.terror(n, tag, out)
+ return false
}
func settableValueOf(i interface{}) reflect.Value {
@@ -482,6 +547,10 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
switch out.Kind() {
case reflect.Slice:
out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Array:
+ if l != out.Len() {
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
+ }
case reflect.Interface:
// No type hints. Will have to use a generic sequence.
iface = out
@@ -500,7 +569,9 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
j++
}
}
- out.Set(out.Slice(0, j))
+ if out.Kind() != reflect.Array {
+ out.Set(out.Slice(0, j))
+ }
if iface.IsValid() {
iface.Set(out)
}
@@ -561,7 +632,7 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
}
e := reflect.New(et).Elem()
if d.unmarshal(n.children[i+1], e) {
- out.SetMapIndex(k, e)
+ d.setMapIndex(n.children[i+1], out, k, e)
}
}
}
@@ -569,6 +640,14 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
return true
}
+func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
+ if d.strict && out.MapIndex(k) != zeroValue {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
+ return
+ }
+ out.SetMapIndex(k, v)
+}
+
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
outt := out.Type()
if outt.Elem() != mapItemType {
@@ -616,6 +695,10 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
elemType = inlineMap.Type().Elem()
}
+ var doneFields []bool
+ if d.strict {
+ doneFields = make([]bool, len(sinfo.FieldsList))
+ }
for i := 0; i < l; i += 2 {
ni := n.children[i]
if isMerge(ni) {
@@ -626,6 +709,13 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
continue
}
if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ if d.strict {
+ if doneFields[info.Id] {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
+ continue
+ }
+ doneFields[info.Id] = true
+ }
var field reflect.Value
if info.Inline == nil {
field = out.Field(info.Num)
@@ -639,7 +729,9 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
}
value := reflect.New(elemType).Elem()
d.unmarshal(n.children[i+1], value)
- inlineMap.SetMapIndex(name, value)
+ d.setMapIndex(n.children[i+1], inlineMap, name, value)
+ } else if d.strict {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
}
}
return true
diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go
index 2befd553..a1c2cc52 100644
--- a/vendor/gopkg.in/yaml.v2/emitterc.go
+++ b/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -2,6 +2,7 @@ package yaml
import (
"bytes"
+ "fmt"
)
// Flush the buffer if needed.
@@ -664,9 +665,8 @@ func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
return yaml_emitter_emit_mapping_start(emitter, event)
default:
return yaml_emitter_set_emitter_error(emitter,
- "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
}
- return false
}
// Expect ALIAS.
@@ -843,7 +843,7 @@ func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event
return true
}
-// Write an achor.
+// Write an anchor.
func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
if emitter.anchor_data.anchor == nil {
return true
@@ -995,10 +995,10 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
break_space = false
space_break = false
- preceeded_by_whitespace = false
- followed_by_whitespace = false
- previous_space = false
- previous_break = false
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
)
emitter.scalar_data.value = value
@@ -1017,7 +1017,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
flow_indicators = true
}
- preceeded_by_whitespace = true
+ preceded_by_whitespace = true
for i, w := 0, 0; i < len(value); i += w {
w = width(value[i])
followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
@@ -1048,7 +1048,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
block_indicators = true
}
case '#':
- if preceeded_by_whitespace {
+ if preceded_by_whitespace {
flow_indicators = true
block_indicators = true
}
@@ -1089,7 +1089,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
}
// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
- preceeded_by_whitespace = is_blankz(value, i)
+ preceded_by_whitespace = is_blankz(value, i)
}
emitter.scalar_data.multiline = line_breaks
diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go
index 84f84995..a14435e8 100644
--- a/vendor/gopkg.in/yaml.v2/encode.go
+++ b/vendor/gopkg.in/yaml.v2/encode.go
@@ -3,12 +3,14 @@ package yaml
import (
"encoding"
"fmt"
+ "io"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
+ "unicode/utf8"
)
type encoder struct {
@@ -16,25 +18,39 @@ type encoder struct {
event yaml_event_t
out []byte
flow bool
+ // doneInit holds whether the initial stream_start_event has been
+ // emitted.
+ doneInit bool
}
-func newEncoder() (e *encoder) {
- e = &encoder{}
- e.must(yaml_emitter_initialize(&e.emitter))
+func newEncoder() *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_string(&e.emitter, &e.out)
yaml_emitter_set_unicode(&e.emitter, true)
- e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
- e.emit()
- e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
- e.emit()
return e
}
-func (e *encoder) finish() {
- e.must(yaml_document_end_event_initialize(&e.event, true))
+func newEncoderWithWriter(w io.Writer) *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, w)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func (e *encoder) init() {
+ if e.doneInit {
+ return
+ }
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
e.emit()
+ e.doneInit = true
+}
+
+func (e *encoder) finish() {
e.emitter.open_ended = false
- e.must(yaml_stream_end_event_initialize(&e.event))
+ yaml_stream_end_event_initialize(&e.event)
e.emit()
}
@@ -44,9 +60,7 @@ func (e *encoder) destroy() {
func (e *encoder) emit() {
// This will internally delete the e.event value.
- if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
- e.must(false)
- }
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
}
func (e *encoder) must(ok bool) {
@@ -59,13 +73,28 @@ func (e *encoder) must(ok bool) {
}
}
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+ e.init()
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+ e.marshal(tag, in)
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+}
+
func (e *encoder) marshal(tag string, in reflect.Value) {
- if !in.IsValid() {
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
e.nilv()
return
}
iface := in.Interface()
- if m, ok := iface.(Marshaler); ok {
+ switch m := iface.(type) {
+ case time.Time, *time.Time:
+ // Although time.Time implements TextMarshaler,
+ // we don't want to treat it as a string for YAML
+ // purposes because YAML has special support for
+ // timestamps.
+ case Marshaler:
v, err := m.MarshalYAML()
if err != nil {
fail(err)
@@ -75,31 +104,34 @@ func (e *encoder) marshal(tag string, in reflect.Value) {
return
}
in = reflect.ValueOf(v)
- } else if m, ok := iface.(encoding.TextMarshaler); ok {
+ case encoding.TextMarshaler:
text, err := m.MarshalText()
if err != nil {
fail(err)
}
in = reflect.ValueOf(string(text))
+ case nil:
+ e.nilv()
+ return
}
switch in.Kind() {
case reflect.Interface:
- if in.IsNil() {
- e.nilv()
- } else {
- e.marshal(tag, in.Elem())
- }
+ e.marshal(tag, in.Elem())
case reflect.Map:
e.mapv(tag, in)
case reflect.Ptr:
- if in.IsNil() {
- e.nilv()
+ if in.Type() == ptrTimeType {
+ e.timev(tag, in.Elem())
} else {
e.marshal(tag, in.Elem())
}
case reflect.Struct:
- e.structv(tag, in)
- case reflect.Slice:
+ if in.Type() == timeType {
+ e.timev(tag, in)
+ } else {
+ e.structv(tag, in)
+ }
+ case reflect.Slice, reflect.Array:
if in.Type().Elem() == mapItemType {
e.itemsv(tag, in)
} else {
@@ -191,10 +223,10 @@ func (e *encoder) mappingv(tag string, f func()) {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
- e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
e.emit()
f()
- e.must(yaml_mapping_end_event_initialize(&e.event))
+ yaml_mapping_end_event_initialize(&e.event)
e.emit()
}
@@ -240,23 +272,36 @@ var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0
func (e *encoder) stringv(tag string, in reflect.Value) {
var style yaml_scalar_style_t
s := in.String()
- rtag, rs := resolve("", s)
- if rtag == yaml_BINARY_TAG {
- if tag == "" || tag == yaml_STR_TAG {
- tag = rtag
- s = rs.(string)
- } else if tag == yaml_BINARY_TAG {
+ canUsePlain := true
+ switch {
+ case !utf8.ValidString(s):
+ if tag == yaml_BINARY_TAG {
failf("explicitly tagged !!binary data must be base64-encoded")
- } else {
+ }
+ if tag != "" {
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
}
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = yaml_BINARY_TAG
+ s = encodeBase64(s)
+ case tag == "":
+ // Check to see if it would resolve to a specific
+ // tag when encoded unquoted. If it doesn't,
+ // there's no need to quote it.
+ rtag, _ := resolve("", s)
+ canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
}
- if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- } else if strings.Contains(s, "\n") {
+ // Note: it's possible for user code to emit invalid YAML
+ // if they explicitly specify a tag and a string containing
+ // text that's incompatible with that tag.
+ switch {
+ case strings.Contains(s, "\n"):
style = yaml_LITERAL_SCALAR_STYLE
- } else {
+ case canUsePlain:
style = yaml_PLAIN_SCALAR_STYLE
+ default:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
}
e.emitScalar(s, "", tag, style)
}
@@ -281,9 +326,20 @@ func (e *encoder) uintv(tag string, in reflect.Value) {
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
+func (e *encoder) timev(tag string, in reflect.Value) {
+ t := in.Interface().(time.Time)
+ s := t.Format(time.RFC3339Nano)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
func (e *encoder) floatv(tag string, in reflect.Value) {
- // FIXME: Handle 64 bits here.
- s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
+ // Issue #352: When formatting, use the precision of the underlying value
+ precision := 64
+ if in.Kind() == reflect.Float32 {
+ precision = 32
+ }
+
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
switch s {
case "+Inf":
s = ".inf"
diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod
new file mode 100644
index 00000000..1934e876
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/go.mod
@@ -0,0 +1,5 @@
+module "gopkg.in/yaml.v2"
+
+require (
+ "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
+)
diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go
index 0a7037ad..81d05dfe 100644
--- a/vendor/gopkg.in/yaml.v2/parserc.go
+++ b/vendor/gopkg.in/yaml.v2/parserc.go
@@ -166,7 +166,6 @@ func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool
default:
panic("invalid parser state")
}
- return false
}
// Parse the production:
diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go
index f4507917..7c1f5fac 100644
--- a/vendor/gopkg.in/yaml.v2/readerc.go
+++ b/vendor/gopkg.in/yaml.v2/readerc.go
@@ -93,9 +93,18 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
panic("read handler must be set")
}
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
// If the EOF flag is set and the raw buffer is empty, do nothing.
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
- return true
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
}
// Return if the buffer contains enough characters.
@@ -389,6 +398,15 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
break
}
}
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
parser.buffer = parser.buffer[:buffer_len]
return true
}
diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go
index 93a86327..6c151db6 100644
--- a/vendor/gopkg.in/yaml.v2/resolve.go
+++ b/vendor/gopkg.in/yaml.v2/resolve.go
@@ -3,9 +3,10 @@ package yaml
import (
"encoding/base64"
"math"
+ "regexp"
"strconv"
"strings"
- "unicode/utf8"
+ "time"
)
type resolveMapItem struct {
@@ -74,12 +75,14 @@ func longTag(tag string) string {
func resolvableTag(tag string) bool {
switch tag {
- case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
+ case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
return true
}
return false
}
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
+
func resolve(tag string, in string) (rtag string, out interface{}) {
if !resolvableTag(tag) {
return tag, in
@@ -89,6 +92,19 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
switch tag {
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
return
+ case yaml_FLOAT_TAG:
+ if rtag == yaml_INT_TAG {
+ switch v := out.(type) {
+ case int64:
+ rtag = yaml_FLOAT_TAG
+ out = float64(v)
+ return
+ case int:
+ rtag = yaml_FLOAT_TAG
+ out = float64(v)
+ return
+ }
+ }
}
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
}()
@@ -122,6 +138,15 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
case 'D', 'S':
// Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == yaml_TIMESTAMP_TAG {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return yaml_TIMESTAMP_TAG, t
+ }
+ }
+
plain := strings.Replace(in, "_", "", -1)
intv, err := strconv.ParseInt(plain, 0, 64)
if err == nil {
@@ -135,9 +160,11 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
if err == nil {
return yaml_INT_TAG, uintv
}
- floatv, err := strconv.ParseFloat(plain, 64)
- if err == nil {
- return yaml_FLOAT_TAG, floatv
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
}
if strings.HasPrefix(plain, "0b") {
intv, err := strconv.ParseInt(plain[2:], 2, 64)
@@ -153,28 +180,20 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
return yaml_INT_TAG, uintv
}
} else if strings.HasPrefix(plain, "-0b") {
- intv, err := strconv.ParseInt(plain[3:], 2, 64)
+ intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
if err == nil {
- if intv == int64(int(intv)) {
- return yaml_INT_TAG, -int(intv)
+ if true || intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
} else {
- return yaml_INT_TAG, -intv
+ return yaml_INT_TAG, intv
}
}
}
- // XXX Handle timestamps here.
-
default:
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
}
}
- if tag == yaml_BINARY_TAG {
- return yaml_BINARY_TAG, in
- }
- if utf8.ValidString(in) {
- return yaml_STR_TAG, in
- }
- return yaml_BINARY_TAG, encodeBase64(in)
+ return yaml_STR_TAG, in
}
// encodeBase64 encodes s as base64 that is broken up into multiple lines
@@ -201,3 +220,39 @@ func encodeBase64(s string) string {
}
return string(out[:k])
}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go
index 25808000..077fd1dd 100644
--- a/vendor/gopkg.in/yaml.v2/scannerc.go
+++ b/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -9,7 +9,7 @@ import (
// ************
//
// The following notes assume that you are familiar with the YAML specification
-// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
// some cases we are less restrictive that it requires.
//
// The process of transforming a YAML stream into a sequence of events is
@@ -611,7 +611,7 @@ func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, co
if directive {
context = "while parsing a %TAG directive"
}
- return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
}
func trace(args ...interface{}) func() {
@@ -871,12 +871,6 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
required := parser.flow_level == 0 && parser.indent == parser.mark.column
- // A simple key is required only when it is the first token in the current
- // line. Therefore it is always allowed. But we add a check anyway.
- if required && !parser.simple_key_allowed {
- panic("should not happen")
- }
-
//
// If the current position may start a simple key, save it.
//
@@ -1944,7 +1938,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma
} else {
// It's either the '!' tag or not really a tag handle. If it's a %TAG
// directive, it's an error. If it's a tag token, it must be a part of URI.
- if directive && !(s[0] == '!' && s[1] == 0) {
+ if directive && string(s) != "!" {
yaml_parser_set_scanner_tag_error(parser, directive,
start_mark, "did not find expected '!'")
return false
@@ -1959,6 +1953,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma
func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
//size_t length = head ? strlen((char *)head) : 0
var s []byte
+ hasTag := len(head) > 0
// Copy the head if needed.
//
@@ -2000,10 +1995,10 @@ func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
return false
}
+ hasTag = true
}
- // Check if the tag is non-empty.
- if len(s) == 0 {
+ if !hasTag {
yaml_parser_set_scanner_tag_error(parser, directive,
start_mark, "did not find expected tag URI")
return false
@@ -2474,6 +2469,10 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si
}
}
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
// Check if we are at the end of the scalar.
if single {
if parser.buffer[parser.buffer_pos] == '\'' {
@@ -2486,10 +2485,6 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si
}
// Consume blank characters.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
if is_blank(parser.buffer, parser.buffer_pos) {
// Consume a space or a tab character.
@@ -2591,19 +2586,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b
// Consume non-blank characters.
for !is_blankz(parser.buffer, parser.buffer_pos) {
- // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
- if parser.flow_level > 0 &&
- parser.buffer[parser.buffer_pos] == ':' &&
- !is_blankz(parser.buffer, parser.buffer_pos+1) {
- yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
- start_mark, "found unexpected ':'")
- return false
- }
-
// Check for indicators that may end a plain scalar.
if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
(parser.flow_level > 0 &&
- (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
+ (parser.buffer[parser.buffer_pos] == ',' ||
parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
parser.buffer[parser.buffer_pos] == '}')) {
@@ -2655,10 +2641,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b
for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
if is_blank(parser.buffer, parser.buffer_pos) {
- // Check for tab character that abuse indentation.
+ // Check for tab characters that abuse indentation.
if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
- start_mark, "found a tab character that violate indentation")
+ start_mark, "found a tab character that violates indentation")
return false
}
diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go
index 5958822f..4c45e660 100644
--- a/vendor/gopkg.in/yaml.v2/sorter.go
+++ b/vendor/gopkg.in/yaml.v2/sorter.go
@@ -51,6 +51,15 @@ func (l keyList) Less(i, j int) bool {
}
var ai, bi int
var an, bn int64
+ if ar[i] == '0' || br[i] == '0' {
+ for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ if ar[j] != '0' {
+ an = 1
+ bn = 1
+ break
+ }
+ }
+ }
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
an = an*10 + int64(ar[ai]-'0')
}
diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go
index 190362f2..a2dde608 100644
--- a/vendor/gopkg.in/yaml.v2/writerc.go
+++ b/vendor/gopkg.in/yaml.v2/writerc.go
@@ -18,72 +18,9 @@ func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
return true
}
- // If the output encoding is UTF-8, we don't need to recode the buffer.
- if emitter.encoding == yaml_UTF8_ENCODING {
- if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
- return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
- }
- emitter.buffer_pos = 0
- return true
- }
-
- // Recode the buffer into the raw buffer.
- var low, high int
- if emitter.encoding == yaml_UTF16LE_ENCODING {
- low, high = 0, 1
- } else {
- high, low = 1, 0
- }
-
- pos := 0
- for pos < emitter.buffer_pos {
- // See the "reader.c" code for more details on UTF-8 encoding. Note
- // that we assume that the buffer contains a valid UTF-8 sequence.
-
- // Read the next UTF-8 character.
- octet := emitter.buffer[pos]
-
- var w int
- var value rune
- switch {
- case octet&0x80 == 0x00:
- w, value = 1, rune(octet&0x7F)
- case octet&0xE0 == 0xC0:
- w, value = 2, rune(octet&0x1F)
- case octet&0xF0 == 0xE0:
- w, value = 3, rune(octet&0x0F)
- case octet&0xF8 == 0xF0:
- w, value = 4, rune(octet&0x07)
- }
- for k := 1; k < w; k++ {
- octet = emitter.buffer[pos+k]
- value = (value << 6) + (rune(octet) & 0x3F)
- }
- pos += w
-
- // Write the character.
- if value < 0x10000 {
- var b [2]byte
- b[high] = byte(value >> 8)
- b[low] = byte(value & 0xFF)
- emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
- } else {
- // Write the character using a surrogate pair (check "reader.c").
- var b [4]byte
- value -= 0x10000
- b[high] = byte(0xD8 + (value >> 18))
- b[low] = byte((value >> 10) & 0xFF)
- b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
- b[low+2] = byte(value & 0xFF)
- emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
- }
- }
-
- // Write the raw buffer.
- if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
- emitter.raw_buffer = emitter.raw_buffer[:0]
return true
}
diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go
index 36d6b883..de85aa4c 100644
--- a/vendor/gopkg.in/yaml.v2/yaml.go
+++ b/vendor/gopkg.in/yaml.v2/yaml.go
@@ -9,6 +9,7 @@ package yaml
import (
"errors"
"fmt"
+ "io"
"reflect"
"strings"
"sync"
@@ -77,8 +78,65 @@ type Marshaler interface {
// supported tag options.
//
func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// UnmarshalStrict is like Unmarshal except that any fields that are found
+// in the data that do not have corresponding struct members, or mapping
+// keys that are duplicates, will result in
+// an error.
+func UnmarshalStrict(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, true)
+}
+
+// A Decorder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ strict bool
+ parser *parser
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// SetStrict sets whether strict decoding behaviour is enabled when
+// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
+func (dec *Decoder) SetStrict(strict bool) {
+ dec.strict = strict
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder(dec.strict)
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
defer handleErr(&err)
- d := newDecoder()
+ d := newDecoder(strict)
p := newParser(in)
defer p.destroy()
node := p.parse()
@@ -99,8 +157,8 @@ func Unmarshal(in []byte, out interface{}) (err error) {
// of the generated document will reflect the structure of the value itself.
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
//
-// Struct fields are only unmarshalled if they are exported (have an upper case
-// first letter), and are unmarshalled using the field name lowercased as the
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
// default key. Custom keys may be defined via the "yaml" name in the field
// tag: the content preceding the first comma is used as the key, and the
// following comma-separated options are used to tweak the marshalling process.
@@ -114,7 +172,10 @@ func Unmarshal(in []byte, out interface{}) (err error) {
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
-// Does not apply to zero valued structs.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be included if that method returns true.
//
// flow Marshal using a flow style (useful for structs,
// sequences and maps).
@@ -129,7 +190,7 @@ func Unmarshal(in []byte, out interface{}) (err error) {
// For example:
//
// type T struct {
-// F int "a,omitempty"
+// F int `yaml:"a,omitempty"`
// B int
// }
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
@@ -139,12 +200,47 @@ func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := newEncoder()
defer e.destroy()
- e.marshal("", reflect.ValueOf(in))
+ e.marshalDoc("", reflect.ValueOf(in))
e.finish()
out = e.out
return
}
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
func handleErr(err *error) {
if v := recover(); v != nil {
if e, ok := v.(yamlError); ok {
@@ -200,6 +296,9 @@ type fieldInfo struct {
Num int
OmitEmpty bool
Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
// Inline holds the field index if the field is part of an inlined struct.
Inline []int
@@ -279,6 +378,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
+ finfo.Id = len(fieldsList)
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
@@ -300,11 +400,16 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
return nil, errors.New(msg)
}
+ info.Id = len(fieldsList)
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
- sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ }
fieldMapMutex.Lock()
structMap[st] = sinfo
@@ -312,8 +417,23 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
return sinfo, nil
}
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
func isZero(v reflect.Value) bool {
- switch v.Kind() {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
case reflect.String:
return len(v.String()) == 0
case reflect.Interface, reflect.Ptr:
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go
index d60a6b6b..e25cee56 100644
--- a/vendor/gopkg.in/yaml.v2/yamlh.go
+++ b/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -1,6 +1,7 @@
package yaml
import (
+ "fmt"
"io"
)
@@ -239,6 +240,27 @@ const (
yaml_MAPPING_END_EVENT // A MAPPING-END event.
)
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
// The event structure.
type yaml_event_t struct {
@@ -508,7 +530,7 @@ type yaml_parser_t struct {
problem string // Error description.
- // The byte about which the problem occured.
+ // The byte about which the problem occurred.
problem_offset int
problem_value int
problem_mark yaml_mark_t
@@ -521,9 +543,9 @@ type yaml_parser_t struct {
read_handler yaml_read_handler_t // Read handler.
- input_file io.Reader // File input data.
- input []byte // String input data.
- input_pos int
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
eof bool // EOF flag
@@ -632,7 +654,7 @@ type yaml_emitter_t struct {
write_handler yaml_write_handler_t // Write handler.
output_buffer *[]byte // String output data.
- output_file io.Writer // File output data.
+ output_writer io.Writer // File output data.
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.