summaryrefslogtreecommitdiffstats
path: root/vendor/go.uber.org/zap/zapcore
diff options
context:
space:
mode:
authorWim <wim@42.be>2023-01-28 22:57:53 +0100
committerGitHub <noreply@github.com>2023-01-28 22:57:53 +0100
commit880586bac42817ffcfea5d9f746f503fa29915b8 (patch)
treea89374cba6f88975f12316ec8d1b8aa1d4c6ba79 /vendor/go.uber.org/zap/zapcore
parenteac2a8c8dc831f946970d327e2a80b26b0684255 (diff)
downloadmatterbridge-msglm-880586bac42817ffcfea5d9f746f503fa29915b8.tar.gz
matterbridge-msglm-880586bac42817ffcfea5d9f746f503fa29915b8.tar.bz2
matterbridge-msglm-880586bac42817ffcfea5d9f746f503fa29915b8.zip
Update dependencies (#1951)
Diffstat (limited to 'vendor/go.uber.org/zap/zapcore')
-rw-r--r--vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go188
-rw-r--r--vendor/go.uber.org/zap/zapcore/clock.go48
-rw-r--r--vendor/go.uber.org/zap/zapcore/console_encoder.go6
-rw-r--r--vendor/go.uber.org/zap/zapcore/encoder.go21
-rw-r--r--vendor/go.uber.org/zap/zapcore/entry.go10
-rw-r--r--vendor/go.uber.org/zap/zapcore/error.go2
-rw-r--r--vendor/go.uber.org/zap/zapcore/json_encoder.go104
-rw-r--r--vendor/go.uber.org/zap/zapcore/level.go12
-rw-r--r--vendor/go.uber.org/zap/zapcore/reflected_encoder.go41
-rw-r--r--vendor/go.uber.org/zap/zapcore/sampler.go27
10 files changed, 393 insertions, 66 deletions
diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
new file mode 100644
index 00000000..ef2f7d96
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
@@ -0,0 +1,188 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "bufio"
+ "sync"
+ "time"
+
+ "go.uber.org/multierr"
+)
+
+const (
+ // _defaultBufferSize specifies the default size used by Buffer.
+ _defaultBufferSize = 256 * 1024 // 256 kB
+
+ // _defaultFlushInterval specifies the default flush interval for
+ // Buffer.
+ _defaultFlushInterval = 30 * time.Second
+)
+
+// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before
+// flushing them to a wrapped WriteSyncer after reaching some limit, or at some
+// fixed interval--whichever comes first.
+//
+// BufferedWriteSyncer is safe for concurrent use. You don't need to use
+// zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
+type BufferedWriteSyncer struct {
+ // WS is the WriteSyncer around which BufferedWriteSyncer will buffer
+ // writes.
+ //
+ // This field is required.
+ WS WriteSyncer
+
+ // Size specifies the maximum amount of data the writer will buffered
+ // before flushing.
+ //
+ // Defaults to 256 kB if unspecified.
+ Size int
+
+ // FlushInterval specifies how often the writer should flush data if
+ // there have been no writes.
+ //
+ // Defaults to 30 seconds if unspecified.
+ FlushInterval time.Duration
+
+ // Clock, if specified, provides control of the source of time for the
+ // writer.
+ //
+ // Defaults to the system clock.
+ Clock Clock
+
+ // unexported fields for state
+ mu sync.Mutex
+ initialized bool // whether initialize() has run
+ stopped bool // whether Stop() has run
+ writer *bufio.Writer
+ ticker *time.Ticker
+ stop chan struct{} // closed when flushLoop should stop
+ done chan struct{} // closed when flushLoop has stopped
+}
+
+func (s *BufferedWriteSyncer) initialize() {
+ size := s.Size
+ if size == 0 {
+ size = _defaultBufferSize
+ }
+
+ flushInterval := s.FlushInterval
+ if flushInterval == 0 {
+ flushInterval = _defaultFlushInterval
+ }
+
+ if s.Clock == nil {
+ s.Clock = DefaultClock
+ }
+
+ s.ticker = s.Clock.NewTicker(flushInterval)
+ s.writer = bufio.NewWriterSize(s.WS, size)
+ s.stop = make(chan struct{})
+ s.done = make(chan struct{})
+ s.initialized = true
+ go s.flushLoop()
+}
+
+// Write writes log data into buffer syncer directly, multiple Write calls will be batched,
+// and log data will be flushed to disk when the buffer is full or periodically.
+func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if !s.initialized {
+ s.initialize()
+ }
+
+ // To avoid partial writes from being flushed, we manually flush the existing buffer if:
+ // * The current write doesn't fit into the buffer fully, and
+ // * The buffer is not empty (since bufio will not split large writes when the buffer is empty)
+ if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 {
+ if err := s.writer.Flush(); err != nil {
+ return 0, err
+ }
+ }
+
+ return s.writer.Write(bs)
+}
+
+// Sync flushes buffered log data into disk directly.
+func (s *BufferedWriteSyncer) Sync() error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ var err error
+ if s.initialized {
+ err = s.writer.Flush()
+ }
+
+ return multierr.Append(err, s.WS.Sync())
+}
+
+// flushLoop flushes the buffer at the configured interval until Stop is
+// called.
+func (s *BufferedWriteSyncer) flushLoop() {
+ defer close(s.done)
+
+ for {
+ select {
+ case <-s.ticker.C:
+ // we just simply ignore error here
+ // because the underlying bufio writer stores any errors
+ // and we return any error from Sync() as part of the close
+ _ = s.Sync()
+ case <-s.stop:
+ return
+ }
+ }
+}
+
+// Stop closes the buffer, cleans up background goroutines, and flushes
+// remaining unwritten data.
+func (s *BufferedWriteSyncer) Stop() (err error) {
+ var stopped bool
+
+ // Critical section.
+ func() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if !s.initialized {
+ return
+ }
+
+ stopped = s.stopped
+ if stopped {
+ return
+ }
+ s.stopped = true
+
+ s.ticker.Stop()
+ close(s.stop) // tell flushLoop to stop
+ <-s.done // and wait until it has
+ }()
+
+ // Don't call Sync on consecutive Stops.
+ if !stopped {
+ err = s.Sync()
+ }
+
+ return err
+}
diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go
new file mode 100644
index 00000000..422fd82a
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/clock.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "time"
+
+// DefaultClock is the default clock used by Zap in operations that require
+// time. This clock uses the system clock for all operations.
+var DefaultClock = systemClock{}
+
+// Clock is a source of time for logged entries.
+type Clock interface {
+ // Now returns the current local time.
+ Now() time.Time
+
+ // NewTicker returns *time.Ticker that holds a channel
+ // that delivers "ticks" of a clock.
+ NewTicker(time.Duration) *time.Ticker
+}
+
+// systemClock implements default Clock that uses system time.
+type systemClock struct{}
+
+func (systemClock) Now() time.Time {
+ return time.Now()
+}
+
+func (systemClock) NewTicker(duration time.Duration) *time.Ticker {
+ return time.NewTicker(duration)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go
index 2307af40..1aa5dc36 100644
--- a/vendor/go.uber.org/zap/zapcore/console_encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go
@@ -125,11 +125,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
line.AppendString(ent.Stack)
}
- if c.LineEnding != "" {
- line.AppendString(c.LineEnding)
- } else {
- line.AppendString(DefaultLineEnding)
- }
+ line.AppendString(c.LineEnding)
return line, nil
}
diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go
index 6601ca16..6e5fd565 100644
--- a/vendor/go.uber.org/zap/zapcore/encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/encoder.go
@@ -22,6 +22,7 @@ package zapcore
import (
"encoding/json"
+ "io"
"time"
"go.uber.org/zap/buffer"
@@ -312,14 +313,15 @@ func (e *NameEncoder) UnmarshalText(text []byte) error {
type EncoderConfig struct {
// Set the keys used for each log entry. If any key is empty, that portion
// of the entry is omitted.
- MessageKey string `json:"messageKey" yaml:"messageKey"`
- LevelKey string `json:"levelKey" yaml:"levelKey"`
- TimeKey string `json:"timeKey" yaml:"timeKey"`
- NameKey string `json:"nameKey" yaml:"nameKey"`
- CallerKey string `json:"callerKey" yaml:"callerKey"`
- FunctionKey string `json:"functionKey" yaml:"functionKey"`
- StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
- LineEnding string `json:"lineEnding" yaml:"lineEnding"`
+ MessageKey string `json:"messageKey" yaml:"messageKey"`
+ LevelKey string `json:"levelKey" yaml:"levelKey"`
+ TimeKey string `json:"timeKey" yaml:"timeKey"`
+ NameKey string `json:"nameKey" yaml:"nameKey"`
+ CallerKey string `json:"callerKey" yaml:"callerKey"`
+ FunctionKey string `json:"functionKey" yaml:"functionKey"`
+ StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
+ SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"`
+ LineEnding string `json:"lineEnding" yaml:"lineEnding"`
// Configure the primitive representations of common complex types. For
// example, some users may want all time.Times serialized as floating-point
// seconds since epoch, while others may prefer ISO8601 strings.
@@ -330,6 +332,9 @@ type EncoderConfig struct {
// Unlike the other primitive type encoders, EncodeName is optional. The
// zero value falls back to FullNameEncoder.
EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`
+ // Configure the encoder for interface{} type objects.
+ // If not provided, objects are encoded using json.Encoder
+ NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"`
// Configures the field separator used by the console encoder. Defaults
// to tab.
ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"`
diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go
index 4aa8b4f9..0885505b 100644
--- a/vendor/go.uber.org/zap/zapcore/entry.go
+++ b/vendor/go.uber.org/zap/zapcore/entry.go
@@ -208,7 +208,7 @@ func (ce *CheckedEntry) Write(fields ...Field) {
// If the entry is dirty, log an internal error; because the
// CheckedEntry is being used after it was returned to the pool,
// the message may be an amalgamation from multiple call sites.
- fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry)
+ fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
ce.ErrorOutput.Sync()
}
return
@@ -219,11 +219,9 @@ func (ce *CheckedEntry) Write(fields ...Field) {
for i := range ce.cores {
err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields))
}
- if ce.ErrorOutput != nil {
- if err != nil {
- fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err)
- ce.ErrorOutput.Sync()
- }
+ if err != nil && ce.ErrorOutput != nil {
+ fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
+ ce.ErrorOutput.Sync()
}
should, msg := ce.should, ce.Message
diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go
index f2a07d78..74919b0c 100644
--- a/vendor/go.uber.org/zap/zapcore/error.go
+++ b/vendor/go.uber.org/zap/zapcore/error.go
@@ -83,7 +83,7 @@ type errorGroup interface {
Errors() []error
}
-// Note that errArry and errArrayElem are very similar to the version
+// Note that errArray and errArrayElem are very similar to the version
// implemented in the top-level error.go file. We can't re-use this because
// that would require exporting errArray as part of the zapcore API.
diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go
index 5cf7d917..c5d751b8 100644
--- a/vendor/go.uber.org/zap/zapcore/json_encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go
@@ -22,7 +22,6 @@ package zapcore
import (
"encoding/base64"
- "encoding/json"
"math"
"sync"
"time"
@@ -64,7 +63,7 @@ type jsonEncoder struct {
// for encoding generic values by reflection
reflectBuf *buffer.Buffer
- reflectEnc *json.Encoder
+ reflectEnc ReflectedEncoder
}
// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
@@ -82,6 +81,17 @@ func NewJSONEncoder(cfg EncoderConfig) Encoder {
}
func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder {
+ if cfg.SkipLineEnding {
+ cfg.LineEnding = ""
+ } else if cfg.LineEnding == "" {
+ cfg.LineEnding = DefaultLineEnding
+ }
+
+ // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default
+ if cfg.NewReflectedEncoder == nil {
+ cfg.NewReflectedEncoder = defaultReflectedEncoder
+ }
+
return &jsonEncoder{
EncoderConfig: &cfg,
buf: bufferpool.Get(),
@@ -118,6 +128,11 @@ func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
enc.AppendComplex128(val)
}
+func (enc *jsonEncoder) AddComplex64(key string, val complex64) {
+ enc.addKey(key)
+ enc.AppendComplex64(val)
+}
+
func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
enc.addKey(key)
enc.AppendDuration(val)
@@ -128,6 +143,11 @@ func (enc *jsonEncoder) AddFloat64(key string, val float64) {
enc.AppendFloat64(val)
}
+func (enc *jsonEncoder) AddFloat32(key string, val float32) {
+ enc.addKey(key)
+ enc.AppendFloat32(val)
+}
+
func (enc *jsonEncoder) AddInt64(key string, val int64) {
enc.addKey(key)
enc.AppendInt64(val)
@@ -136,10 +156,7 @@ func (enc *jsonEncoder) AddInt64(key string, val int64) {
func (enc *jsonEncoder) resetReflectBuf() {
if enc.reflectBuf == nil {
enc.reflectBuf = bufferpool.Get()
- enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
-
- // For consistency with our custom JSON encoder.
- enc.reflectEnc.SetEscapeHTML(false)
+ enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf)
} else {
enc.reflectBuf.Reset()
}
@@ -201,10 +218,16 @@ func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error {
}
func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
+ // Close ONLY new openNamespaces that are created during
+ // AppendObject().
+ old := enc.openNamespaces
+ enc.openNamespaces = 0
enc.addElementSeparator()
enc.buf.AppendByte('{')
err := obj.MarshalLogObject(enc)
enc.buf.AppendByte('}')
+ enc.closeOpenNamespaces()
+ enc.openNamespaces = old
return err
}
@@ -220,16 +243,23 @@ func (enc *jsonEncoder) AppendByteString(val []byte) {
enc.buf.AppendByte('"')
}
-func (enc *jsonEncoder) AppendComplex128(val complex128) {
+// appendComplex appends the encoded form of the provided complex128 value.
+// precision specifies the encoding precision for the real and imaginary
+// components of the complex number.
+func (enc *jsonEncoder) appendComplex(val complex128, precision int) {
enc.addElementSeparator()
// Cast to a platform-independent, fixed-size type.
r, i := float64(real(val)), float64(imag(val))
enc.buf.AppendByte('"')
// Because we're always in a quoted string, we can use strconv without
// special-casing NaN and +/-Inf.
- enc.buf.AppendFloat(r, 64)
- enc.buf.AppendByte('+')
- enc.buf.AppendFloat(i, 64)
+ enc.buf.AppendFloat(r, precision)
+ // If imaginary part is less than 0, minus (-) sign is added by default
+ // by AppendFloat.
+ if i >= 0 {
+ enc.buf.AppendByte('+')
+ }
+ enc.buf.AppendFloat(i, precision)
enc.buf.AppendByte('i')
enc.buf.AppendByte('"')
}
@@ -292,29 +322,28 @@ func (enc *jsonEncoder) AppendUint64(val uint64) {
enc.buf.AppendUint(val)
}
-func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
-func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) }
-func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
-func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
-func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
-func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) }
+func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) }
+func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
+func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
+func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) Clone() Encoder {
clone := enc.clone()
@@ -335,7 +364,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
final := enc.clone()
final.buf.AppendByte('{')
- if final.LevelKey != "" {
+ if final.LevelKey != "" && final.EncodeLevel != nil {
final.addKey(final.LevelKey)
cur := final.buf.Len()
final.EncodeLevel(ent.Level, final)
@@ -396,11 +425,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
final.AddString(final.StacktraceKey, ent.Stack)
}
final.buf.AppendByte('}')
- if final.LineEnding != "" {
- final.buf.AppendString(final.LineEnding)
- } else {
- final.buf.AppendString(DefaultLineEnding)
- }
+ final.buf.AppendString(final.LineEnding)
ret := final.buf
putJSONEncoder(final)
@@ -415,6 +440,7 @@ func (enc *jsonEncoder) closeOpenNamespaces() {
for i := 0; i < enc.openNamespaces; i++ {
enc.buf.AppendByte('}')
}
+ enc.openNamespaces = 0
}
func (enc *jsonEncoder) addKey(key string) {
diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go
index e575c9f4..56e88dc0 100644
--- a/vendor/go.uber.org/zap/zapcore/level.go
+++ b/vendor/go.uber.org/zap/zapcore/level.go
@@ -55,6 +55,18 @@ const (
_maxLevel = FatalLevel
)
+// ParseLevel parses a level based on the lower-case or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseLevel(text string) (Level, error) {
+ var level Level
+ err := level.UnmarshalText([]byte(text))
+ return level, err
+}
+
// String returns a lower-case ASCII representation of the log level.
func (l Level) String() string {
switch l {
diff --git a/vendor/go.uber.org/zap/zapcore/reflected_encoder.go b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
new file mode 100644
index 00000000..8746360e
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// ReflectedEncoder serializes log fields that can't be serialized with Zap's
+// JSON encoder. These have the ReflectType field type.
+// Use EncoderConfig.NewReflectedEncoder to set this.
+type ReflectedEncoder interface {
+ // Encode encodes and writes to the underlying data stream.
+ Encode(interface{}) error
+}
+
+func defaultReflectedEncoder(w io.Writer) ReflectedEncoder {
+ enc := json.NewEncoder(w)
+ // For consistency with our custom JSON encoder.
+ enc.SetEscapeHTML(false)
+ return enc
+}
diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go
index 25f10ca1..8c116049 100644
--- a/vendor/go.uber.org/zap/zapcore/sampler.go
+++ b/vendor/go.uber.org/zap/zapcore/sampler.go
@@ -133,10 +133,21 @@ func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
// each tick. If more Entries with the same level and message are seen during
// the same interval, every Mth message is logged and the rest are dropped.
//
+// For example,
+//
+// core = NewSamplerWithOptions(core, time.Second, 10, 5)
+//
+// This will log the first 10 log entries with the same level and message
+// in a one second interval as-is. Following that, it will allow through
+// every 5th log entry with the same level and message in that interval.
+//
+// If thereafter is zero, the Core will drop all log entries after the first N
+// in that interval.
+//
// Sampler can be configured to report sampling decisions with the SamplerHook
// option.
//
-// Keep in mind that zap's sampling implementation is optimized for speed over
+// Keep in mind that Zap's sampling implementation is optimized for speed over
// absolute precision; under load, each tick may be slightly over- or
// under-sampled.
func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core {
@@ -197,12 +208,14 @@ func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
return ce
}
- counter := s.counts.get(ent.Level, ent.Message)
- n := counter.IncCheckReset(ent.Time, s.tick)
- if n > s.first && (n-s.first)%s.thereafter != 0 {
- s.hook(ent, LogDropped)
- return ce
+ if ent.Level >= _minLevel && ent.Level <= _maxLevel {
+ counter := s.counts.get(ent.Level, ent.Message)
+ n := counter.IncCheckReset(ent.Time, s.tick)
+ if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) {
+ s.hook(ent, LogDropped)
+ return ce
+ }
+ s.hook(ent, LogSampled)
}
- s.hook(ent, LogSampled)
return s.Core.Check(ent, ce)
}