diff options
author | Wim <wim@42.be> | 2017-02-18 23:00:46 +0100 |
---|---|---|
committer | Wim <wim@42.be> | 2017-02-18 23:11:48 +0100 |
commit | 930b639cc9cd2d2873302f30303378c0e53816a8 (patch) | |
tree | 8cd3f1d464fb5d4e5607fe16255c35a31a9d8b62 /vendor/github.com/valyala | |
parent | 58483ea70c2c99a352592c5e50686fb03985650e (diff) | |
download | matterbridge-msglm-930b639cc9cd2d2873302f30303378c0e53816a8.tar.gz matterbridge-msglm-930b639cc9cd2d2873302f30303378c0e53816a8.tar.bz2 matterbridge-msglm-930b639cc9cd2d2873302f30303378c0e53816a8.zip |
Update vendor
Diffstat (limited to 'vendor/github.com/valyala')
10 files changed, 919 insertions, 0 deletions
diff --git a/vendor/github.com/valyala/bytebufferpool/LICENSE b/vendor/github.com/valyala/bytebufferpool/LICENSE new file mode 100644 index 00000000..f7c935c2 --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/valyala/bytebufferpool/bytebuffer.go b/vendor/github.com/valyala/bytebufferpool/bytebuffer.go new file mode 100644 index 00000000..07a055a2 --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/bytebuffer.go @@ -0,0 +1,111 @@ +package bytebufferpool + +import "io" + +// ByteBuffer provides byte buffer, which can be used for minimizing +// memory allocations. +// +// ByteBuffer may be used with functions appending data to the given []byte +// slice. See example code for details. +// +// Use Get for obtaining an empty byte buffer. +type ByteBuffer struct { + + // B is a byte buffer to use in append-like workloads. + // See example code for details. + B []byte +} + +// Len returns the size of the byte buffer. +func (b *ByteBuffer) Len() int { + return len(b.B) +} + +// ReadFrom implements io.ReaderFrom. +// +// The function appends all the data read from r to b. +func (b *ByteBuffer) ReadFrom(r io.Reader) (int64, error) { + p := b.B + nStart := int64(len(p)) + nMax := int64(cap(p)) + n := nStart + if nMax == 0 { + nMax = 64 + p = make([]byte, nMax) + } else { + p = p[:nMax] + } + for { + if n == nMax { + nMax *= 2 + bNew := make([]byte, nMax) + copy(bNew, p) + p = bNew + } + nn, err := r.Read(p[n:]) + n += int64(nn) + if err != nil { + b.B = p[:n] + n -= nStart + if err == io.EOF { + return n, nil + } + return n, err + } + } +} + +// WriteTo implements io.WriterTo. +func (b *ByteBuffer) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(b.B) + return int64(n), err +} + +// Bytes returns b.B, i.e. all the bytes accumulated in the buffer. +// +// The purpose of this function is bytes.Buffer compatibility. +func (b *ByteBuffer) Bytes() []byte { + return b.B +} + +// Write implements io.Writer - it appends p to ByteBuffer.B +func (b *ByteBuffer) Write(p []byte) (int, error) { + b.B = append(b.B, p...) + return len(p), nil +} + +// WriteByte appends the byte c to the buffer. +// +// The purpose of this function is bytes.Buffer compatibility. +// +// The function always returns nil. +func (b *ByteBuffer) WriteByte(c byte) error { + b.B = append(b.B, c) + return nil +} + +// WriteString appends s to ByteBuffer.B. +func (b *ByteBuffer) WriteString(s string) (int, error) { + b.B = append(b.B, s...) + return len(s), nil +} + +// Set sets ByteBuffer.B to p. +func (b *ByteBuffer) Set(p []byte) { + b.B = append(b.B[:0], p...) +} + +// SetString sets ByteBuffer.B to s. +func (b *ByteBuffer) SetString(s string) { + b.B = append(b.B[:0], s...) +} + +// String returns string representation of ByteBuffer.B. +func (b *ByteBuffer) String() string { + return string(b.B) +} + +// Reset makes ByteBuffer.B empty. +func (b *ByteBuffer) Reset() { + b.B = b.B[:0] +} diff --git a/vendor/github.com/valyala/bytebufferpool/doc.go b/vendor/github.com/valyala/bytebufferpool/doc.go new file mode 100644 index 00000000..e511b7c5 --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/doc.go @@ -0,0 +1,7 @@ +// Package bytebufferpool implements a pool of byte buffers +// with anti-fragmentation protection. +// +// The pool may waste limited amount of memory due to fragmentation. +// This amount equals to the maximum total size of the byte buffers +// in concurrent use. +package bytebufferpool diff --git a/vendor/github.com/valyala/bytebufferpool/pool.go b/vendor/github.com/valyala/bytebufferpool/pool.go new file mode 100644 index 00000000..8bb4134d --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/pool.go @@ -0,0 +1,151 @@ +package bytebufferpool + +import ( + "sort" + "sync" + "sync/atomic" +) + +const ( + minBitSize = 6 // 2**6=64 is a CPU cache line size + steps = 20 + + minSize = 1 << minBitSize + maxSize = 1 << (minBitSize + steps - 1) + + calibrateCallsThreshold = 42000 + maxPercentile = 0.95 +) + +// Pool represents byte buffer pool. +// +// Distinct pools may be used for distinct types of byte buffers. +// Properly determined byte buffer types with their own pools may help reducing +// memory waste. +type Pool struct { + calls [steps]uint64 + calibrating uint64 + + defaultSize uint64 + maxSize uint64 + + pool sync.Pool +} + +var defaultPool Pool + +// Get returns an empty byte buffer from the pool. +// +// Got byte buffer may be returned to the pool via Put call. +// This reduces the number of memory allocations required for byte buffer +// management. +func Get() *ByteBuffer { return defaultPool.Get() } + +// Get returns new byte buffer with zero length. +// +// The byte buffer may be returned to the pool via Put after the use +// in order to minimize GC overhead. +func (p *Pool) Get() *ByteBuffer { + v := p.pool.Get() + if v != nil { + return v.(*ByteBuffer) + } + return &ByteBuffer{ + B: make([]byte, 0, atomic.LoadUint64(&p.defaultSize)), + } +} + +// Put returns byte buffer to the pool. +// +// ByteBuffer.B mustn't be touched after returning it to the pool. +// Otherwise data races will occur. +func Put(b *ByteBuffer) { defaultPool.Put(b) } + +// Put releases byte buffer obtained via Get to the pool. +// +// The buffer mustn't be accessed after returning to the pool. +func (p *Pool) Put(b *ByteBuffer) { + idx := index(len(b.B)) + + if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold { + p.calibrate() + } + + maxSize := int(atomic.LoadUint64(&p.maxSize)) + if maxSize == 0 || cap(b.B) <= maxSize { + b.Reset() + p.pool.Put(b) + } +} + +func (p *Pool) calibrate() { + if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) { + return + } + + a := make(callSizes, 0, steps) + var callsSum uint64 + for i := uint64(0); i < steps; i++ { + calls := atomic.SwapUint64(&p.calls[i], 0) + callsSum += calls + a = append(a, callSize{ + calls: calls, + size: minSize << i, + }) + } + sort.Sort(a) + + defaultSize := a[0].size + maxSize := defaultSize + + maxSum := uint64(float64(callsSum) * maxPercentile) + callsSum = 0 + for i := 0; i < steps; i++ { + if callsSum > maxSum { + break + } + callsSum += a[i].calls + size := a[i].size + if size > maxSize { + maxSize = size + } + } + + atomic.StoreUint64(&p.defaultSize, defaultSize) + atomic.StoreUint64(&p.maxSize, maxSize) + + atomic.StoreUint64(&p.calibrating, 0) +} + +type callSize struct { + calls uint64 + size uint64 +} + +type callSizes []callSize + +func (ci callSizes) Len() int { + return len(ci) +} + +func (ci callSizes) Less(i, j int) bool { + return ci[i].calls > ci[j].calls +} + +func (ci callSizes) Swap(i, j int) { + ci[i], ci[j] = ci[j], ci[i] +} + +func index(n int) int { + n-- + n >>= minBitSize + idx := 0 + for n > 0 { + n >>= 1 + idx++ + } + if idx >= steps { + idx = steps - 1 + } + return idx +} diff --git a/vendor/github.com/valyala/fasttemplate/LICENSE b/vendor/github.com/valyala/fasttemplate/LICENSE new file mode 100644 index 00000000..7125a63c --- /dev/null +++ b/vendor/github.com/valyala/fasttemplate/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Aliaksandr Valialkin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/valyala/fasttemplate/template.go b/vendor/github.com/valyala/fasttemplate/template.go new file mode 100644 index 00000000..91209201 --- /dev/null +++ b/vendor/github.com/valyala/fasttemplate/template.go @@ -0,0 +1,317 @@ +// Package fasttemplate implements simple and fast template library. +// +// Fasttemplate is faster than text/template, strings.Replace +// and strings.Replacer. +// +// Fasttemplate ideally fits for fast and simple placeholders' substitutions. +package fasttemplate + +import ( + "bytes" + "fmt" + "github.com/valyala/bytebufferpool" + "io" +) + +// ExecuteFunc calls f on each template tag (placeholder) occurrence. +// +// Returns the number of bytes written to w. +// +// This function is optimized for constantly changing templates. +// Use Template.ExecuteFunc for frozen templates. +func ExecuteFunc(template, startTag, endTag string, w io.Writer, f TagFunc) (int64, error) { + s := unsafeString2Bytes(template) + a := unsafeString2Bytes(startTag) + b := unsafeString2Bytes(endTag) + + var nn int64 + var ni int + var err error + for { + n := bytes.Index(s, a) + if n < 0 { + break + } + ni, err = w.Write(s[:n]) + nn += int64(ni) + if err != nil { + return nn, err + } + + s = s[n+len(a):] + n = bytes.Index(s, b) + if n < 0 { + // cannot find end tag - just write it to the output. + ni, _ = w.Write(a) + nn += int64(ni) + break + } + + ni, err = f(w, unsafeBytes2String(s[:n])) + nn += int64(ni) + s = s[n+len(b):] + } + ni, err = w.Write(s) + nn += int64(ni) + + return nn, err +} + +// Execute substitutes template tags (placeholders) with the corresponding +// values from the map m and writes the result to the given writer w. +// +// Substitution map m may contain values with the following types: +// * []byte - the fastest value type +// * string - convenient value type +// * TagFunc - flexible value type +// +// Returns the number of bytes written to w. +// +// This function is optimized for constantly changing templates. +// Use Template.Execute for frozen templates. +func Execute(template, startTag, endTag string, w io.Writer, m map[string]interface{}) (int64, error) { + return ExecuteFunc(template, startTag, endTag, w, func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) }) +} + +// ExecuteFuncString calls f on each template tag (placeholder) occurrence +// and substitutes it with the data written to TagFunc's w. +// +// Returns the resulting string. +// +// This function is optimized for constantly changing templates. +// Use Template.ExecuteFuncString for frozen templates. +func ExecuteFuncString(template, startTag, endTag string, f TagFunc) string { + tagsCount := bytes.Count(unsafeString2Bytes(template), unsafeString2Bytes(startTag)) + if tagsCount == 0 { + return template + } + + bb := byteBufferPool.Get() + if _, err := ExecuteFunc(template, startTag, endTag, bb, f); err != nil { + panic(fmt.Sprintf("unexpected error: %s", err)) + } + s := string(bb.B) + bb.Reset() + byteBufferPool.Put(bb) + return s +} + +var byteBufferPool bytebufferpool.Pool + +// ExecuteString substitutes template tags (placeholders) with the corresponding +// values from the map m and returns the result. +// +// Substitution map m may contain values with the following types: +// * []byte - the fastest value type +// * string - convenient value type +// * TagFunc - flexible value type +// +// This function is optimized for constantly changing templates. +// Use Template.ExecuteString for frozen templates. +func ExecuteString(template, startTag, endTag string, m map[string]interface{}) string { + return ExecuteFuncString(template, startTag, endTag, func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) }) +} + +// Template implements simple template engine, which can be used for fast +// tags' (aka placeholders) substitution. +type Template struct { + template string + startTag string + endTag string + + texts [][]byte + tags []string + byteBufferPool bytebufferpool.Pool +} + +// New parses the given template using the given startTag and endTag +// as tag start and tag end. +// +// The returned template can be executed by concurrently running goroutines +// using Execute* methods. +// +// New panics if the given template cannot be parsed. Use NewTemplate instead +// if template may contain errors. +func New(template, startTag, endTag string) *Template { + t, err := NewTemplate(template, startTag, endTag) + if err != nil { + panic(err) + } + return t +} + +// NewTemplate parses the given template using the given startTag and endTag +// as tag start and tag end. +// +// The returned template can be executed by concurrently running goroutines +// using Execute* methods. +func NewTemplate(template, startTag, endTag string) (*Template, error) { + var t Template + err := t.Reset(template, startTag, endTag) + if err != nil { + return nil, err + } + return &t, nil +} + +// TagFunc can be used as a substitution value in the map passed to Execute*. +// Execute* functions pass tag (placeholder) name in 'tag' argument. +// +// TagFunc must be safe to call from concurrently running goroutines. +// +// TagFunc must write contents to w and return the number of bytes written. +type TagFunc func(w io.Writer, tag string) (int, error) + +// Reset resets the template t to new one defined by +// template, startTag and endTag. +// +// Reset allows Template object re-use. +// +// Reset may be called only if no other goroutines call t methods at the moment. +func (t *Template) Reset(template, startTag, endTag string) error { + // Keep these vars in t, so GC won't collect them and won't break + // vars derived via unsafe* + t.template = template + t.startTag = startTag + t.endTag = endTag + t.texts = t.texts[:0] + t.tags = t.tags[:0] + + if len(startTag) == 0 { + panic("startTag cannot be empty") + } + if len(endTag) == 0 { + panic("endTag cannot be empty") + } + + s := unsafeString2Bytes(template) + a := unsafeString2Bytes(startTag) + b := unsafeString2Bytes(endTag) + + tagsCount := bytes.Count(s, a) + if tagsCount == 0 { + return nil + } + + if tagsCount+1 > cap(t.texts) { + t.texts = make([][]byte, 0, tagsCount+1) + } + if tagsCount > cap(t.tags) { + t.tags = make([]string, 0, tagsCount) + } + + for { + n := bytes.Index(s, a) + if n < 0 { + t.texts = append(t.texts, s) + break + } + t.texts = append(t.texts, s[:n]) + + s = s[n+len(a):] + n = bytes.Index(s, b) + if n < 0 { + return fmt.Errorf("Cannot find end tag=%q in the template=%q starting from %q", endTag, template, s) + } + + t.tags = append(t.tags, unsafeBytes2String(s[:n])) + s = s[n+len(b):] + } + + return nil +} + +// ExecuteFunc calls f on each template tag (placeholder) occurrence. +// +// Returns the number of bytes written to w. +// +// This function is optimized for frozen templates. +// Use ExecuteFunc for constantly changing templates. +func (t *Template) ExecuteFunc(w io.Writer, f TagFunc) (int64, error) { + var nn int64 + + n := len(t.texts) - 1 + if n == -1 { + ni, err := w.Write(unsafeString2Bytes(t.template)) + return int64(ni), err + } + + for i := 0; i < n; i++ { + ni, err := w.Write(t.texts[i]) + nn += int64(ni) + if err != nil { + return nn, err + } + + ni, err = f(w, t.tags[i]) + nn += int64(ni) + if err != nil { + return nn, err + } + } + ni, err := w.Write(t.texts[n]) + nn += int64(ni) + return nn, err +} + +// Execute substitutes template tags (placeholders) with the corresponding +// values from the map m and writes the result to the given writer w. +// +// Substitution map m may contain values with the following types: +// * []byte - the fastest value type +// * string - convenient value type +// * TagFunc - flexible value type +// +// Returns the number of bytes written to w. +func (t *Template) Execute(w io.Writer, m map[string]interface{}) (int64, error) { + return t.ExecuteFunc(w, func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) }) +} + +// ExecuteFuncString calls f on each template tag (placeholder) occurrence +// and substitutes it with the data written to TagFunc's w. +// +// Returns the resulting string. +// +// This function is optimized for frozen templates. +// Use ExecuteFuncString for constantly changing templates. +func (t *Template) ExecuteFuncString(f TagFunc) string { + bb := t.byteBufferPool.Get() + if _, err := t.ExecuteFunc(bb, f); err != nil { + panic(fmt.Sprintf("unexpected error: %s", err)) + } + s := string(bb.Bytes()) + bb.Reset() + t.byteBufferPool.Put(bb) + return s +} + +// ExecuteString substitutes template tags (placeholders) with the corresponding +// values from the map m and returns the result. +// +// Substitution map m may contain values with the following types: +// * []byte - the fastest value type +// * string - convenient value type +// * TagFunc - flexible value type +// +// This function is optimized for frozen templates. +// Use ExecuteString for constantly changing templates. +func (t *Template) ExecuteString(m map[string]interface{}) string { + return t.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) }) +} + +func stdTagFunc(w io.Writer, tag string, m map[string]interface{}) (int, error) { + v := m[tag] + if v == nil { + return 0, nil + } + switch value := v.(type) { + case []byte: + return w.Write(value) + case string: + return w.Write([]byte(value)) + case TagFunc: + return value(w, tag) + default: + panic(fmt.Sprintf("tag=%q contains unexpected value type=%#v. Expected []byte, string or TagFunc", tag, v)) + } +} diff --git a/vendor/github.com/valyala/fasttemplate/unsafe.go b/vendor/github.com/valyala/fasttemplate/unsafe.go new file mode 100644 index 00000000..5e25b0ff --- /dev/null +++ b/vendor/github.com/valyala/fasttemplate/unsafe.go @@ -0,0 +1,20 @@ +package fasttemplate + +import ( + "reflect" + "unsafe" +) + +func unsafeBytes2String(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func unsafeString2Bytes(s string) []byte { + sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + bh := reflect.SliceHeader{ + Data: sh.Data, + Len: sh.Len, + Cap: sh.Len, + } + return *(*[]byte)(unsafe.Pointer(&bh)) +} diff --git a/vendor/github.com/valyala/fasttemplate/vendor/github.com/valyala/bytebufferpool/bytebuffer.go b/vendor/github.com/valyala/fasttemplate/vendor/github.com/valyala/bytebufferpool/bytebuffer.go new file mode 100644 index 00000000..07a055a2 --- /dev/null +++ b/vendor/github.com/valyala/fasttemplate/vendor/github.com/valyala/bytebufferpool/bytebuffer.go @@ -0,0 +1,111 @@ +package bytebufferpool + +import "io" + +// ByteBuffer provides byte buffer, which can be used for minimizing +// memory allocations. +// +// ByteBuffer may be used with functions appending data to the given []byte +// slice. See example code for details. +// +// Use Get for obtaining an empty byte buffer. +type ByteBuffer struct { + + // B is a byte buffer to use in append-like workloads. + // See example code for details. + B []byte +} + +// Len returns the size of the byte buffer. +func (b *ByteBuffer) Len() int { + return len(b.B) +} + +// ReadFrom implements io.ReaderFrom. +// +// The function appends all the data read from r to b. +func (b *ByteBuffer) ReadFrom(r io.Reader) (int64, error) { + p := b.B + nStart := int64(len(p)) + nMax := int64(cap(p)) + n := nStart + if nMax == 0 { + nMax = 64 + p = make([]byte, nMax) + } else { + p = p[:nMax] + } + for { + if n == nMax { + nMax *= 2 + bNew := make([]byte, nMax) + copy(bNew, p) + p = bNew + } + nn, err := r.Read(p[n:]) + n += int64(nn) + if err != nil { + b.B = p[:n] + n -= nStart + if err == io.EOF { + return n, nil + } + return n, err + } + } +} + +// WriteTo implements io.WriterTo. +func (b *ByteBuffer) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(b.B) + return int64(n), err +} + +// Bytes returns b.B, i.e. all the bytes accumulated in the buffer. +// +// The purpose of this function is bytes.Buffer compatibility. +func (b *ByteBuffer) Bytes() []byte { + return b.B +} + +// Write implements io.Writer - it appends p to ByteBuffer.B +func (b *ByteBuffer) Write(p []byte) (int, error) { + b.B = append(b.B, p...) + return len(p), nil +} + +// WriteByte appends the byte c to the buffer. +// +// The purpose of this function is bytes.Buffer compatibility. +// +// The function always returns nil. +func (b *ByteBuffer) WriteByte(c byte) error { + b.B = append(b.B, c) + return nil +} + +// WriteString appends s to ByteBuffer.B. +func (b *ByteBuffer) WriteString(s string) (int, error) { + b.B = append(b.B, s...) + return len(s), nil +} + +// Set sets ByteBuffer.B to p. +func (b *ByteBuffer) Set(p []byte) { + b.B = append(b.B[:0], p...) +} + +// SetString sets ByteBuffer.B to s. +func (b *ByteBuffer) SetString(s string) { + b.B = append(b.B[:0], s...) +} + +// String returns string representation of ByteBuffer.B. +func (b *ByteBuffer) String() string { + return string(b.B) +} + +// Reset makes ByteBuffer.B empty. +func (b *ByteBuffer) Reset() { + b.B = b.B[:0] +} diff --git a/vendor/github.com/valyala/fasttemplate/vendor/github.com/valyala/bytebufferpool/doc.go b/vendor/github.com/valyala/fasttemplate/vendor/github.com/valyala/bytebufferpool/doc.go new file mode 100644 index 00000000..e511b7c5 --- /dev/null +++ b/vendor/github.com/valyala/fasttemplate/vendor/github.com/valyala/bytebufferpool/doc.go @@ -0,0 +1,7 @@ +// Package bytebufferpool implements a pool of byte buffers +// with anti-fragmentation protection. +// +// The pool may waste limited amount of memory due to fragmentation. +// This amount equals to the maximum total size of the byte buffers +// in concurrent use. +package bytebufferpool diff --git a/vendor/github.com/valyala/fasttemplate/vendor/github.com/valyala/bytebufferpool/pool.go b/vendor/github.com/valyala/fasttemplate/vendor/github.com/valyala/bytebufferpool/pool.go new file mode 100644 index 00000000..8bb4134d --- /dev/null +++ b/vendor/github.com/valyala/fasttemplate/vendor/github.com/valyala/bytebufferpool/pool.go @@ -0,0 +1,151 @@ +package bytebufferpool + +import ( + "sort" + "sync" + "sync/atomic" +) + +const ( + minBitSize = 6 // 2**6=64 is a CPU cache line size + steps = 20 + + minSize = 1 << minBitSize + maxSize = 1 << (minBitSize + steps - 1) + + calibrateCallsThreshold = 42000 + maxPercentile = 0.95 +) + +// Pool represents byte buffer pool. +// +// Distinct pools may be used for distinct types of byte buffers. +// Properly determined byte buffer types with their own pools may help reducing +// memory waste. +type Pool struct { + calls [steps]uint64 + calibrating uint64 + + defaultSize uint64 + maxSize uint64 + + pool sync.Pool +} + +var defaultPool Pool + +// Get returns an empty byte buffer from the pool. +// +// Got byte buffer may be returned to the pool via Put call. +// This reduces the number of memory allocations required for byte buffer +// management. +func Get() *ByteBuffer { return defaultPool.Get() } + +// Get returns new byte buffer with zero length. +// +// The byte buffer may be returned to the pool via Put after the use +// in order to minimize GC overhead. +func (p *Pool) Get() *ByteBuffer { + v := p.pool.Get() + if v != nil { + return v.(*ByteBuffer) + } + return &ByteBuffer{ + B: make([]byte, 0, atomic.LoadUint64(&p.defaultSize)), + } +} + +// Put returns byte buffer to the pool. +// +// ByteBuffer.B mustn't be touched after returning it to the pool. +// Otherwise data races will occur. +func Put(b *ByteBuffer) { defaultPool.Put(b) } + +// Put releases byte buffer obtained via Get to the pool. +// +// The buffer mustn't be accessed after returning to the pool. +func (p *Pool) Put(b *ByteBuffer) { + idx := index(len(b.B)) + + if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold { + p.calibrate() + } + + maxSize := int(atomic.LoadUint64(&p.maxSize)) + if maxSize == 0 || cap(b.B) <= maxSize { + b.Reset() + p.pool.Put(b) + } +} + +func (p *Pool) calibrate() { + if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) { + return + } + + a := make(callSizes, 0, steps) + var callsSum uint64 + for i := uint64(0); i < steps; i++ { + calls := atomic.SwapUint64(&p.calls[i], 0) + callsSum += calls + a = append(a, callSize{ + calls: calls, + size: minSize << i, + }) + } + sort.Sort(a) + + defaultSize := a[0].size + maxSize := defaultSize + + maxSum := uint64(float64(callsSum) * maxPercentile) + callsSum = 0 + for i := 0; i < steps; i++ { + if callsSum > maxSum { + break + } + callsSum += a[i].calls + size := a[i].size + if size > maxSize { + maxSize = size + } + } + + atomic.StoreUint64(&p.defaultSize, defaultSize) + atomic.StoreUint64(&p.maxSize, maxSize) + + atomic.StoreUint64(&p.calibrating, 0) +} + +type callSize struct { + calls uint64 + size uint64 +} + +type callSizes []callSize + +func (ci callSizes) Len() int { + return len(ci) +} + +func (ci callSizes) Less(i, j int) bool { + return ci[i].calls > ci[j].calls +} + +func (ci callSizes) Swap(i, j int) { + ci[i], ci[j] = ci[j], ci[i] +} + +func index(n int) int { + n-- + n >>= minBitSize + idx := 0 + for n > 0 { + n >>= 1 + idx++ + } + if idx >= steps { + idx = steps - 1 + } + return idx +} |