summaryrefslogtreecommitdiffstats
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/BurntSushi/toml/COPYING14
-rw-r--r--vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go90
-rw-r--r--vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go131
-rw-r--r--vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go61
-rw-r--r--vendor/github.com/BurntSushi/toml/decode.go509
-rw-r--r--vendor/github.com/BurntSushi/toml/decode_meta.go121
-rw-r--r--vendor/github.com/BurntSushi/toml/doc.go27
-rw-r--r--vendor/github.com/BurntSushi/toml/encode.go568
-rw-r--r--vendor/github.com/BurntSushi/toml/encoding_types.go19
-rw-r--r--vendor/github.com/BurntSushi/toml/encoding_types_1.1.go18
-rw-r--r--vendor/github.com/BurntSushi/toml/lex.go858
-rw-r--r--vendor/github.com/BurntSushi/toml/parse.go557
-rw-r--r--vendor/github.com/BurntSushi/toml/type_check.go91
-rw-r--r--vendor/github.com/BurntSushi/toml/type_fields.go242
-rw-r--r--vendor/gopkg.in/gcfg.v1/LICENSE28
-rw-r--r--vendor/gopkg.in/gcfg.v1/doc.go118
-rw-r--r--vendor/gopkg.in/gcfg.v1/go1_0.go7
-rw-r--r--vendor/gopkg.in/gcfg.v1/go1_2.go9
-rw-r--r--vendor/gopkg.in/gcfg.v1/read.go188
-rw-r--r--vendor/gopkg.in/gcfg.v1/scanner/errors.go121
-rw-r--r--vendor/gopkg.in/gcfg.v1/scanner/scanner.go342
-rw-r--r--vendor/gopkg.in/gcfg.v1/set.go293
-rw-r--r--vendor/gopkg.in/gcfg.v1/token/position.go435
-rw-r--r--vendor/gopkg.in/gcfg.v1/token/serialize.go56
-rw-r--r--vendor/gopkg.in/gcfg.v1/token/token.go83
-rw-r--r--vendor/gopkg.in/gcfg.v1/types/bool.go23
-rw-r--r--vendor/gopkg.in/gcfg.v1/types/doc.go4
-rw-r--r--vendor/gopkg.in/gcfg.v1/types/enum.go44
-rw-r--r--vendor/gopkg.in/gcfg.v1/types/int.go86
-rw-r--r--vendor/gopkg.in/gcfg.v1/types/scan.go23
-rw-r--r--vendor/manifest16
31 files changed, 3314 insertions, 1868 deletions
diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING
new file mode 100644
index 00000000..5a8e3325
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/COPYING
@@ -0,0 +1,14 @@
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
+
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. You just DO WHAT THE FUCK YOU WANT TO.
+
diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go
new file mode 100644
index 00000000..14e75570
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go
@@ -0,0 +1,90 @@
+// Command toml-test-decoder satisfies the toml-test interface for testing
+// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout.
+package main
+
+import (
+ "encoding/json"
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "path"
+ "time"
+
+ "github.com/BurntSushi/toml"
+)
+
+func init() {
+ log.SetFlags(0)
+
+ flag.Usage = usage
+ flag.Parse()
+}
+
+func usage() {
+ log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0]))
+ flag.PrintDefaults()
+
+ os.Exit(1)
+}
+
+func main() {
+ if flag.NArg() != 0 {
+ flag.Usage()
+ }
+
+ var tmp interface{}
+ if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil {
+ log.Fatalf("Error decoding TOML: %s", err)
+ }
+
+ typedTmp := translate(tmp)
+ if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil {
+ log.Fatalf("Error encoding JSON: %s", err)
+ }
+}
+
+func translate(tomlData interface{}) interface{} {
+ switch orig := tomlData.(type) {
+ case map[string]interface{}:
+ typed := make(map[string]interface{}, len(orig))
+ for k, v := range orig {
+ typed[k] = translate(v)
+ }
+ return typed
+ case []map[string]interface{}:
+ typed := make([]map[string]interface{}, len(orig))
+ for i, v := range orig {
+ typed[i] = translate(v).(map[string]interface{})
+ }
+ return typed
+ case []interface{}:
+ typed := make([]interface{}, len(orig))
+ for i, v := range orig {
+ typed[i] = translate(v)
+ }
+
+ // We don't really need to tag arrays, but let's be future proof.
+ // (If TOML ever supports tuples, we'll need this.)
+ return tag("array", typed)
+ case time.Time:
+ return tag("datetime", orig.Format("2006-01-02T15:04:05Z"))
+ case bool:
+ return tag("bool", fmt.Sprintf("%v", orig))
+ case int64:
+ return tag("integer", fmt.Sprintf("%d", orig))
+ case float64:
+ return tag("float", fmt.Sprintf("%v", orig))
+ case string:
+ return tag("string", orig)
+ }
+
+ panic(fmt.Sprintf("Unknown type: %T", tomlData))
+}
+
+func tag(typeName string, data interface{}) map[string]interface{} {
+ return map[string]interface{}{
+ "type": typeName,
+ "value": data,
+ }
+}
diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go
new file mode 100644
index 00000000..092cc684
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go
@@ -0,0 +1,131 @@
+// Command toml-test-encoder satisfies the toml-test interface for testing
+// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout.
+package main
+
+import (
+ "encoding/json"
+ "flag"
+ "log"
+ "os"
+ "path"
+ "strconv"
+ "time"
+
+ "github.com/BurntSushi/toml"
+)
+
+func init() {
+ log.SetFlags(0)
+
+ flag.Usage = usage
+ flag.Parse()
+}
+
+func usage() {
+ log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0]))
+ flag.PrintDefaults()
+
+ os.Exit(1)
+}
+
+func main() {
+ if flag.NArg() != 0 {
+ flag.Usage()
+ }
+
+ var tmp interface{}
+ if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil {
+ log.Fatalf("Error decoding JSON: %s", err)
+ }
+
+ tomlData := translate(tmp)
+ if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil {
+ log.Fatalf("Error encoding TOML: %s", err)
+ }
+}
+
+func translate(typedJson interface{}) interface{} {
+ switch v := typedJson.(type) {
+ case map[string]interface{}:
+ if len(v) == 2 && in("type", v) && in("value", v) {
+ return untag(v)
+ }
+ m := make(map[string]interface{}, len(v))
+ for k, v2 := range v {
+ m[k] = translate(v2)
+ }
+ return m
+ case []interface{}:
+ tabArray := make([]map[string]interface{}, len(v))
+ for i := range v {
+ if m, ok := translate(v[i]).(map[string]interface{}); ok {
+ tabArray[i] = m
+ } else {
+ log.Fatalf("JSON arrays may only contain objects. This " +
+ "corresponds to only tables being allowed in " +
+ "TOML table arrays.")
+ }
+ }
+ return tabArray
+ }
+ log.Fatalf("Unrecognized JSON format '%T'.", typedJson)
+ panic("unreachable")
+}
+
+func untag(typed map[string]interface{}) interface{} {
+ t := typed["type"].(string)
+ v := typed["value"]
+ switch t {
+ case "string":
+ return v.(string)
+ case "integer":
+ v := v.(string)
+ n, err := strconv.Atoi(v)
+ if err != nil {
+ log.Fatalf("Could not parse '%s' as integer: %s", v, err)
+ }
+ return n
+ case "float":
+ v := v.(string)
+ f, err := strconv.ParseFloat(v, 64)
+ if err != nil {
+ log.Fatalf("Could not parse '%s' as float64: %s", v, err)
+ }
+ return f
+ case "datetime":
+ v := v.(string)
+ t, err := time.Parse("2006-01-02T15:04:05Z", v)
+ if err != nil {
+ log.Fatalf("Could not parse '%s' as a datetime: %s", v, err)
+ }
+ return t
+ case "bool":
+ v := v.(string)
+ switch v {
+ case "true":
+ return true
+ case "false":
+ return false
+ }
+ log.Fatalf("Could not parse '%s' as a boolean.", v)
+ case "array":
+ v := v.([]interface{})
+ array := make([]interface{}, len(v))
+ for i := range v {
+ if m, ok := v[i].(map[string]interface{}); ok {
+ array[i] = untag(m)
+ } else {
+ log.Fatalf("Arrays may only contain other arrays or "+
+ "primitive values, but found a '%T'.", m)
+ }
+ }
+ return array
+ }
+ log.Fatalf("Unrecognized tag type '%s'.", t)
+ panic("unreachable")
+}
+
+func in(key string, m map[string]interface{}) bool {
+ _, ok := m[key]
+ return ok
+}
diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go b/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go
new file mode 100644
index 00000000..c7d689a7
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go
@@ -0,0 +1,61 @@
+// Command tomlv validates TOML documents and prints each key's type.
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "path"
+ "strings"
+ "text/tabwriter"
+
+ "github.com/BurntSushi/toml"
+)
+
+var (
+ flagTypes = false
+)
+
+func init() {
+ log.SetFlags(0)
+
+ flag.BoolVar(&flagTypes, "types", flagTypes,
+ "When set, the types of every defined key will be shown.")
+
+ flag.Usage = usage
+ flag.Parse()
+}
+
+func usage() {
+ log.Printf("Usage: %s toml-file [ toml-file ... ]\n",
+ path.Base(os.Args[0]))
+ flag.PrintDefaults()
+
+ os.Exit(1)
+}
+
+func main() {
+ if flag.NArg() < 1 {
+ flag.Usage()
+ }
+ for _, f := range flag.Args() {
+ var tmp interface{}
+ md, err := toml.DecodeFile(f, &tmp)
+ if err != nil {
+ log.Fatalf("Error in '%s': %s", f, err)
+ }
+ if flagTypes {
+ printTypes(md)
+ }
+ }
+}
+
+func printTypes(md toml.MetaData) {
+ tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
+ for _, key := range md.Keys() {
+ fmt.Fprintf(tabw, "%s%s\t%s\n",
+ strings.Repeat(" ", len(key)-1), key, md.Type(key...))
+ }
+ tabw.Flush()
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
new file mode 100644
index 00000000..b0fd51d5
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -0,0 +1,509 @@
+package toml
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "reflect"
+ "strings"
+ "time"
+)
+
+func e(format string, args ...interface{}) error {
+ return fmt.Errorf("toml: "+format, args...)
+}
+
+// Unmarshaler is the interface implemented by objects that can unmarshal a
+// TOML description of themselves.
+type Unmarshaler interface {
+ UnmarshalTOML(interface{}) error
+}
+
+// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
+func Unmarshal(p []byte, v interface{}) error {
+ _, err := Decode(string(p), v)
+ return err
+}
+
+// Primitive is a TOML value that hasn't been decoded into a Go value.
+// When using the various `Decode*` functions, the type `Primitive` may
+// be given to any value, and its decoding will be delayed.
+//
+// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
+//
+// The underlying representation of a `Primitive` value is subject to change.
+// Do not rely on it.
+//
+// N.B. Primitive values are still parsed, so using them will only avoid
+// the overhead of reflection. They can be useful when you don't know the
+// exact type of TOML data until run time.
+type Primitive struct {
+ undecoded interface{}
+ context Key
+}
+
+// DEPRECATED!
+//
+// Use MetaData.PrimitiveDecode instead.
+func PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md := MetaData{decoded: make(map[string]bool)}
+ return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// PrimitiveDecode is just like the other `Decode*` functions, except it
+// decodes a TOML value that has already been parsed. Valid primitive values
+// can *only* be obtained from values filled by the decoder functions,
+// including this method. (i.e., `v` may contain more `Primitive`
+// values.)
+//
+// Meta data for primitive values is included in the meta data returned by
+// the `Decode*` functions with one exception: keys returned by the Undecoded
+// method will only reflect keys that were decoded. Namely, any keys hidden
+// behind a Primitive will be considered undecoded. Executing this method will
+// update the undecoded keys in the meta data. (See the example.)
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md.context = primValue.context
+ defer func() { md.context = nil }()
+ return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// Decode will decode the contents of `data` in TOML format into a pointer
+// `v`.
+//
+// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
+// used interchangeably.)
+//
+// TOML arrays of tables correspond to either a slice of structs or a slice
+// of maps.
+//
+// TOML datetimes correspond to Go `time.Time` values.
+//
+// All other TOML types (float, string, int, bool and array) correspond
+// to the obvious Go types.
+//
+// An exception to the above rules is if a type implements the
+// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
+// (floats, strings, integers, booleans and datetimes) will be converted to
+// a byte string and given to the value's UnmarshalText method. See the
+// Unmarshaler example for a demonstration with time duration strings.
+//
+// Key mapping
+//
+// TOML keys can map to either keys in a Go map or field names in a Go
+// struct. The special `toml` struct tag may be used to map TOML keys to
+// struct fields that don't match the key name exactly. (See the example.)
+// A case insensitive match to struct names will be tried if an exact match
+// can't be found.
+//
+// The mapping between TOML values and Go values is loose. That is, there
+// may exist TOML values that cannot be placed into your representation, and
+// there may be parts of your representation that do not correspond to
+// TOML values. This loose mapping can be made stricter by using the IsDefined
+// and/or Undecoded methods on the MetaData returned.
+//
+// This decoder will not handle cyclic types. If a cyclic type is passed,
+// `Decode` will not terminate.
+func Decode(data string, v interface{}) (MetaData, error) {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr {
+ return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
+ }
+ if rv.IsNil() {
+ return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
+ }
+ p, err := parse(data)
+ if err != nil {
+ return MetaData{}, err
+ }
+ md := MetaData{
+ p.mapping, p.types, p.ordered,
+ make(map[string]bool, len(p.ordered)), nil,
+ }
+ return md, md.unify(p.mapping, indirect(rv))
+}
+
+// DecodeFile is just like Decode, except it will automatically read the
+// contents of the file at `fpath` and decode it for you.
+func DecodeFile(fpath string, v interface{}) (MetaData, error) {
+ bs, err := ioutil.ReadFile(fpath)
+ if err != nil {
+ return MetaData{}, err
+ }
+ return Decode(string(bs), v)
+}
+
+// DecodeReader is just like Decode, except it will consume all bytes
+// from the reader and decode it for you.
+func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
+ bs, err := ioutil.ReadAll(r)
+ if err != nil {
+ return MetaData{}, err
+ }
+ return Decode(string(bs), v)
+}
+
+// unify performs a sort of type unification based on the structure of `rv`,
+// which is the client representation.
+//
+// Any type mismatch produces an error. Finding a type that we don't know
+// how to handle produces an unsupported type error.
+func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
+
+ // Special case. Look for a `Primitive` value.
+ if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
+ // Save the undecoded data and the key context into the primitive
+ // value.
+ context := make(Key, len(md.context))
+ copy(context, md.context)
+ rv.Set(reflect.ValueOf(Primitive{
+ undecoded: data,
+ context: context,
+ }))
+ return nil
+ }
+
+ // Special case. Unmarshaler Interface support.
+ if rv.CanAddr() {
+ if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
+ return v.UnmarshalTOML(data)
+ }
+ }
+
+ // Special case. Handle time.Time values specifically.
+ // TODO: Remove this code when we decide to drop support for Go 1.1.
+ // This isn't necessary in Go 1.2 because time.Time satisfies the encoding
+ // interfaces.
+ if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
+ return md.unifyDatetime(data, rv)
+ }
+
+ // Special case. Look for a value satisfying the TextUnmarshaler interface.
+ if v, ok := rv.Interface().(TextUnmarshaler); ok {
+ return md.unifyText(data, v)
+ }
+ // BUG(burntsushi)
+ // The behavior here is incorrect whenever a Go type satisfies the
+ // encoding.TextUnmarshaler interface but also corresponds to a TOML
+ // hash or array. In particular, the unmarshaler should only be applied
+ // to primitive TOML values. But at this point, it will be applied to
+ // all kinds of values and produce an incorrect error whenever those values
+ // are hashes or arrays (including arrays of tables).
+
+ k := rv.Kind()
+
+ // laziness
+ if k >= reflect.Int && k <= reflect.Uint64 {
+ return md.unifyInt(data, rv)
+ }
+ switch k {
+ case reflect.Ptr:
+ elem := reflect.New(rv.Type().Elem())
+ err := md.unify(data, reflect.Indirect(elem))
+ if err != nil {
+ return err
+ }
+ rv.Set(elem)
+ return nil
+ case reflect.Struct:
+ return md.unifyStruct(data, rv)
+ case reflect.Map:
+ return md.unifyMap(data, rv)
+ case reflect.Array:
+ return md.unifyArray(data, rv)
+ case reflect.Slice:
+ return md.unifySlice(data, rv)
+ case reflect.String:
+ return md.unifyString(data, rv)
+ case reflect.Bool:
+ return md.unifyBool(data, rv)
+ case reflect.Interface:
+ // we only support empty interfaces.
+ if rv.NumMethod() > 0 {
+ return e("unsupported type %s", rv.Type())
+ }
+ return md.unifyAnything(data, rv)
+ case reflect.Float32:
+ fallthrough
+ case reflect.Float64:
+ return md.unifyFloat64(data, rv)
+ }
+ return e("unsupported type %s", rv.Kind())
+}
+
+func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]interface{})
+ if !ok {
+ if mapping == nil {
+ return nil
+ }
+ return e("type mismatch for %s: expected table but found %T",
+ rv.Type().String(), mapping)
+ }
+
+ for key, datum := range tmap {
+ var f *field
+ fields := cachedTypeFields(rv.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if ff.name == key {
+ f = ff
+ break
+ }
+ if f == nil && strings.EqualFold(ff.name, key) {
+ f = ff
+ }
+ }
+ if f != nil {
+ subv := rv
+ for _, i := range f.index {
+ subv = indirect(subv.Field(i))
+ }
+ if isUnifiable(subv) {
+ md.decoded[md.context.add(key).String()] = true
+ md.context = append(md.context, key)
+ if err := md.unify(datum, subv); err != nil {
+ return err
+ }
+ md.context = md.context[0 : len(md.context)-1]
+ } else if f.name != "" {
+ // Bad user! No soup for you!
+ return e("cannot write unexported field %s.%s",
+ rv.Type().String(), f.name)
+ }
+ }
+ }
+ return nil
+}
+
+func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]interface{})
+ if !ok {
+ if tmap == nil {
+ return nil
+ }
+ return badtype("map", mapping)
+ }
+ if rv.IsNil() {
+ rv.Set(reflect.MakeMap(rv.Type()))
+ }
+ for k, v := range tmap {
+ md.decoded[md.context.add(k).String()] = true
+ md.context = append(md.context, k)
+
+ rvkey := indirect(reflect.New(rv.Type().Key()))
+ rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
+ if err := md.unify(v, rvval); err != nil {
+ return err
+ }
+ md.context = md.context[0 : len(md.context)-1]
+
+ rvkey.SetString(k)
+ rv.SetMapIndex(rvkey, rvval)
+ }
+ return nil
+}
+
+func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
+ datav := reflect.ValueOf(data)
+ if datav.Kind() != reflect.Slice {
+ if !datav.IsValid() {
+ return nil
+ }
+ return badtype("slice", data)
+ }
+ sliceLen := datav.Len()
+ if sliceLen != rv.Len() {
+ return e("expected array length %d; got TOML array of length %d",
+ rv.Len(), sliceLen)
+ }
+ return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
+ datav := reflect.ValueOf(data)
+ if datav.Kind() != reflect.Slice {
+ if !datav.IsValid() {
+ return nil
+ }
+ return badtype("slice", data)
+ }
+ n := datav.Len()
+ if rv.IsNil() || rv.Cap() < n {
+ rv.Set(reflect.MakeSlice(rv.Type(), n, n))
+ }
+ rv.SetLen(n)
+ return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
+ sliceLen := data.Len()
+ for i := 0; i < sliceLen; i++ {
+ v := data.Index(i).Interface()
+ sliceval := indirect(rv.Index(i))
+ if err := md.unify(v, sliceval); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
+ if _, ok := data.(time.Time); ok {
+ rv.Set(reflect.ValueOf(data))
+ return nil
+ }
+ return badtype("time.Time", data)
+}
+
+func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
+ if s, ok := data.(string); ok {
+ rv.SetString(s)
+ return nil
+ }
+ return badtype("string", data)
+}
+
+func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+ if num, ok := data.(float64); ok {
+ switch rv.Kind() {
+ case reflect.Float32:
+ fallthrough
+ case reflect.Float64:
+ rv.SetFloat(num)
+ default:
+ panic("bug")
+ }
+ return nil
+ }
+ return badtype("float", data)
+}
+
+func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
+ if num, ok := data.(int64); ok {
+ if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int64:
+ // No bounds checking necessary.
+ case reflect.Int8:
+ if num < math.MinInt8 || num > math.MaxInt8 {
+ return e("value %d is out of range for int8", num)
+ }
+ case reflect.Int16:
+ if num < math.MinInt16 || num > math.MaxInt16 {
+ return e("value %d is out of range for int16", num)
+ }
+ case reflect.Int32:
+ if num < math.MinInt32 || num > math.MaxInt32 {
+ return e("value %d is out of range for int32", num)
+ }
+ }
+ rv.SetInt(num)
+ } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
+ unum := uint64(num)
+ switch rv.Kind() {
+ case reflect.Uint, reflect.Uint64:
+ // No bounds checking necessary.
+ case reflect.Uint8:
+ if num < 0 || unum > math.MaxUint8 {
+ return e("value %d is out of range for uint8", num)
+ }
+ case reflect.Uint16:
+ if num < 0 || unum > math.MaxUint16 {
+ return e("value %d is out of range for uint16", num)
+ }
+ case reflect.Uint32:
+ if num < 0 || unum > math.MaxUint32 {
+ return e("value %d is out of range for uint32", num)
+ }
+ }
+ rv.SetUint(unum)
+ } else {
+ panic("unreachable")
+ }
+ return nil
+ }
+ return badtype("integer", data)
+}
+
+func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
+ if b, ok := data.(bool); ok {
+ rv.SetBool(b)
+ return nil
+ }
+ return badtype("boolean", data)
+}
+
+func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
+ rv.Set(reflect.ValueOf(data))
+ return nil
+}
+
+func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
+ var s string
+ switch sdata := data.(type) {
+ case TextMarshaler:
+ text, err := sdata.MarshalText()
+ if err != nil {
+ return err
+ }
+ s = string(text)
+ case fmt.Stringer:
+ s = sdata.String()
+ case string:
+ s = sdata
+ case bool:
+ s = fmt.Sprintf("%v", sdata)
+ case int64:
+ s = fmt.Sprintf("%d", sdata)
+ case float64:
+ s = fmt.Sprintf("%f", sdata)
+ default:
+ return badtype("primitive (string-like)", data)
+ }
+ if err := v.UnmarshalText([]byte(s)); err != nil {
+ return err
+ }
+ return nil
+}
+
+// rvalue returns a reflect.Value of `v`. All pointers are resolved.
+func rvalue(v interface{}) reflect.Value {
+ return indirect(reflect.ValueOf(v))
+}
+
+// indirect returns the value pointed to by a pointer.
+// Pointers are followed until the value is not a pointer.
+// New values are allocated for each nil pointer.
+//
+// An exception to this rule is if the value satisfies an interface of
+// interest to us (like encoding.TextUnmarshaler).
+func indirect(v reflect.Value) reflect.Value {
+ if v.Kind() != reflect.Ptr {
+ if v.CanSet() {
+ pv := v.Addr()
+ if _, ok := pv.Interface().(TextUnmarshaler); ok {
+ return pv
+ }
+ }
+ return v
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ return indirect(reflect.Indirect(v))
+}
+
+func isUnifiable(rv reflect.Value) bool {
+ if rv.CanSet() {
+ return true
+ }
+ if _, ok := rv.Interface().(TextUnmarshaler); ok {
+ return true
+ }
+ return false
+}
+
+func badtype(expected string, data interface{}) error {
+ return e("cannot load TOML value of type %T into a Go %s", data, expected)
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go
new file mode 100644
index 00000000..b9914a67
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode_meta.go
@@ -0,0 +1,121 @@
+package toml
+
+import "strings"
+
+// MetaData allows access to meta information about TOML data that may not
+// be inferrable via reflection. In particular, whether a key has been defined
+// and the TOML type of a key.
+type MetaData struct {
+ mapping map[string]interface{}
+ types map[string]tomlType
+ keys []Key
+ decoded map[string]bool
+ context Key // Used only during decoding.
+}
+
+// IsDefined returns true if the key given exists in the TOML data. The key
+// should be specified hierarchially. e.g.,
+//
+// // access the TOML key 'a.b.c'
+// IsDefined("a", "b", "c")
+//
+// IsDefined will return false if an empty key given. Keys are case sensitive.
+func (md *MetaData) IsDefined(key ...string) bool {
+ if len(key) == 0 {
+ return false
+ }
+
+ var hash map[string]interface{}
+ var ok bool
+ var hashOrVal interface{} = md.mapping
+ for _, k := range key {
+ if hash, ok = hashOrVal.(map[string]interface{}); !ok {
+ return false
+ }
+ if hashOrVal, ok = hash[k]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// Type returns a string representation of the type of the key specified.
+//
+// Type will return the empty string if given an empty key or a key that
+// does not exist. Keys are case sensitive.
+func (md *MetaData) Type(key ...string) string {
+ fullkey := strings.Join(key, ".")
+ if typ, ok := md.types[fullkey]; ok {
+ return typ.typeString()
+ }
+ return ""
+}
+
+// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
+// to get values of this type.
+type Key []string
+
+func (k Key) String() string {
+ return strings.Join(k, ".")
+}
+
+func (k Key) maybeQuotedAll() string {
+ var ss []string
+ for i := range k {
+ ss = append(ss, k.maybeQuoted(i))
+ }
+ return strings.Join(ss, ".")
+}
+
+func (k Key) maybeQuoted(i int) string {
+ quote := false
+ for _, c := range k[i] {
+ if !isBareKeyChar(c) {
+ quote = true
+ break
+ }
+ }
+ if quote {
+ return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
+ }
+ return k[i]
+}
+
+func (k Key) add(piece string) Key {
+ newKey := make(Key, len(k)+1)
+ copy(newKey, k)
+ newKey[len(k)] = piece
+ return newKey
+}
+
+// Keys returns a slice of every key in the TOML data, including key groups.
+// Each key is itself a slice, where the first element is the top of the
+// hierarchy and the last is the most specific.
+//
+// The list will have the same order as the keys appeared in the TOML data.
+//
+// All keys returned are non-empty.
+func (md *MetaData) Keys() []Key {
+ return md.keys
+}
+
+// Undecoded returns all keys that have not been decoded in the order in which
+// they appear in the original TOML document.
+//
+// This includes keys that haven't been decoded because of a Primitive value.
+// Once the Primitive value is decoded, the keys will be considered decoded.
+//
+// Also note that decoding into an empty interface will result in no decoding,
+// and so no keys will be considered decoded.
+//
+// In this sense, the Undecoded keys correspond to keys in the TOML document
+// that do not have a concrete type in your representation.
+func (md *MetaData) Undecoded() []Key {
+ undecoded := make([]Key, 0, len(md.keys))
+ for _, key := range md.keys {
+ if !md.decoded[key.String()] {
+ undecoded = append(undecoded, key)
+ }
+ }
+ return undecoded
+}
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
new file mode 100644
index 00000000..fe268000
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/doc.go
@@ -0,0 +1,27 @@
+/*
+Package toml provides facilities for decoding and encoding TOML configuration
+files via reflection. There is also support for delaying decoding with
+the Primitive type, and querying the set of keys in a TOML document with the
+MetaData type.
+
+The specification implemented: https://github.com/mojombo/toml
+
+The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
+whether a file is a valid TOML document. It can also be used to print the
+type of each key in a TOML document.
+
+Testing
+
+There are two important types of tests used for this package. The first is
+contained inside '*_test.go' files and uses the standard Go unit testing
+framework. These tests are primarily devoted to holistically testing the
+decoder and encoder.
+
+The second type of testing is used to verify the implementation's adherence
+to the TOML specification. These tests have been factored into their own
+project: https://github.com/BurntSushi/toml-test
+
+The reason the tests are in a separate project is so that they can be used by
+any implementation of TOML. Namely, it is language agnostic.
+*/
+package toml
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
new file mode 100644
index 00000000..0f2558b2
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -0,0 +1,568 @@
+package toml
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type tomlEncodeError struct{ error }
+
+var (
+ errArrayMixedElementTypes = errors.New(
+ "toml: cannot encode array with mixed element types")
+ errArrayNilElement = errors.New(
+ "toml: cannot encode array with nil element")
+ errNonString = errors.New(
+ "toml: cannot encode a map with non-string key type")
+ errAnonNonStruct = errors.New(
+ "toml: cannot encode an anonymous field that is not a struct")
+ errArrayNoTable = errors.New(
+ "toml: TOML array element cannot contain a table")
+ errNoKey = errors.New(
+ "toml: top-level values must be Go maps or structs")
+ errAnything = errors.New("") // used in testing
+)
+
+var quotedReplacer = strings.NewReplacer(
+ "\t", "\\t",
+ "\n", "\\n",
+ "\r", "\\r",
+ "\"", "\\\"",
+ "\\", "\\\\",
+)
+
+// Encoder controls the encoding of Go values to a TOML document to some
+// io.Writer.
+//
+// The indentation level can be controlled with the Indent field.
+type Encoder struct {
+ // A single indentation level. By default it is two spaces.
+ Indent string
+
+ // hasWritten is whether we have written any output to w yet.
+ hasWritten bool
+ w *bufio.Writer
+}
+
+// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
+// given. By default, a single indentation level is 2 spaces.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ w: bufio.NewWriter(w),
+ Indent: " ",
+ }
+}
+
+// Encode writes a TOML representation of the Go value to the underlying
+// io.Writer. If the value given cannot be encoded to a valid TOML document,
+// then an error is returned.
+//
+// The mapping between Go values and TOML values should be precisely the same
+// as for the Decode* functions. Similarly, the TextMarshaler interface is
+// supported by encoding the resulting bytes as strings. (If you want to write
+// arbitrary binary data then you will need to use something like base64 since
+// TOML does not have any binary types.)
+//
+// When encoding TOML hashes (i.e., Go maps or structs), keys without any
+// sub-hashes are encoded first.
+//
+// If a Go map is encoded, then its keys are sorted alphabetically for
+// deterministic output. More control over this behavior may be provided if
+// there is demand for it.
+//
+// Encoding Go values without a corresponding TOML representation---like map
+// types with non-string keys---will cause an error to be returned. Similarly
+// for mixed arrays/slices, arrays/slices with nil elements, embedded
+// non-struct types and nested slices containing maps or structs.
+// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
+// and so is []map[string][]string.)
+func (enc *Encoder) Encode(v interface{}) error {
+ rv := eindirect(reflect.ValueOf(v))
+ if err := enc.safeEncode(Key([]string{}), rv); err != nil {
+ return err
+ }
+ return enc.w.Flush()
+}
+
+func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if terr, ok := r.(tomlEncodeError); ok {
+ err = terr.error
+ return
+ }
+ panic(r)
+ }
+ }()
+ enc.encode(key, rv)
+ return nil
+}
+
+func (enc *Encoder) encode(key Key, rv reflect.Value) {
+ // Special case. Time needs to be in ISO8601 format.
+ // Special case. If we can marshal the type to text, then we used that.
+ // Basically, this prevents the encoder for handling these types as
+ // generic structs (or whatever the underlying type of a TextMarshaler is).
+ switch rv.Interface().(type) {
+ case time.Time, TextMarshaler:
+ enc.keyEqElement(key, rv)
+ return
+ }
+
+ k := rv.Kind()
+ switch k {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64,
+ reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
+ enc.keyEqElement(key, rv)
+ case reflect.Array, reflect.Slice:
+ if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
+ enc.eArrayOfTables(key, rv)
+ } else {
+ enc.keyEqElement(key, rv)
+ }
+ case reflect.Interface:
+ if rv.IsNil() {
+ return
+ }
+ enc.encode(key, rv.Elem())
+ case reflect.Map:
+ if rv.IsNil() {
+ return
+ }
+ enc.eTable(key, rv)
+ case reflect.Ptr:
+ if rv.IsNil() {
+ return
+ }
+ enc.encode(key, rv.Elem())
+ case reflect.Struct:
+ enc.eTable(key, rv)
+ default:
+ panic(e("unsupported type for key '%s': %s", key, k))
+ }
+}
+
+// eElement encodes any value that can be an array element (primitives and
+// arrays).
+func (enc *Encoder) eElement(rv reflect.Value) {
+ switch v := rv.Interface().(type) {
+ case time.Time:
+ // Special case time.Time as a primitive. Has to come before
+ // TextMarshaler below because time.Time implements
+ // encoding.TextMarshaler, but we need to always use UTC.
+ enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
+ return
+ case TextMarshaler:
+ // Special case. Use text marshaler if it's available for this value.
+ if s, err := v.MarshalText(); err != nil {
+ encPanic(err)
+ } else {
+ enc.writeQuoted(string(s))
+ }
+ return
+ }
+ switch rv.Kind() {
+ case reflect.Bool:
+ enc.wf(strconv.FormatBool(rv.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64:
+ enc.wf(strconv.FormatInt(rv.Int(), 10))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16,
+ reflect.Uint32, reflect.Uint64:
+ enc.wf(strconv.FormatUint(rv.Uint(), 10))
+ case reflect.Float32:
+ enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
+ case reflect.Float64:
+ enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
+ case reflect.Array, reflect.Slice:
+ enc.eArrayOrSliceElement(rv)
+ case reflect.Interface:
+ enc.eElement(rv.Elem())
+ case reflect.String:
+ enc.writeQuoted(rv.String())
+ default:
+ panic(e("unexpected primitive type: %s", rv.Kind()))
+ }
+}
+
+// By the TOML spec, all floats must have a decimal with at least one
+// number on either side.
+func floatAddDecimal(fstr string) string {
+ if !strings.Contains(fstr, ".") {
+ return fstr + ".0"
+ }
+ return fstr
+}
+
+func (enc *Encoder) writeQuoted(s string) {
+ enc.wf("\"%s\"", quotedReplacer.Replace(s))
+}
+
+func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
+ length := rv.Len()
+ enc.wf("[")
+ for i := 0; i < length; i++ {
+ elem := rv.Index(i)
+ enc.eElement(elem)
+ if i != length-1 {
+ enc.wf(", ")
+ }
+ }
+ enc.wf("]")
+}
+
+func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
+ if len(key) == 0 {
+ encPanic(errNoKey)
+ }
+ for i := 0; i < rv.Len(); i++ {
+ trv := rv.Index(i)
+ if isNil(trv) {
+ continue
+ }
+ panicIfInvalidKey(key)
+ enc.newline()
+ enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.newline()
+ enc.eMapOrStruct(key, trv)
+ }
+}
+
+func (enc *Encoder) eTable(key Key, rv reflect.Value) {
+ panicIfInvalidKey(key)
+ if len(key) == 1 {
+ // Output an extra new line between top-level tables.
+ // (The newline isn't written if nothing else has been written though.)
+ enc.newline()
+ }
+ if len(key) > 0 {
+ enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.newline()
+ }
+ enc.eMapOrStruct(key, rv)
+}
+
+func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
+ switch rv := eindirect(rv); rv.Kind() {
+ case reflect.Map:
+ enc.eMap(key, rv)
+ case reflect.Struct:
+ enc.eStruct(key, rv)
+ default:
+ panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
+ }
+}
+
+func (enc *Encoder) eMap(key Key, rv reflect.Value) {
+ rt := rv.Type()
+ if rt.Key().Kind() != reflect.String {
+ encPanic(errNonString)
+ }
+
+ // Sort keys so that we have deterministic output. And write keys directly
+ // underneath this key first, before writing sub-structs or sub-maps.
+ var mapKeysDirect, mapKeysSub []string
+ for _, mapKey := range rv.MapKeys() {
+ k := mapKey.String()
+ if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
+ mapKeysSub = append(mapKeysSub, k)
+ } else {
+ mapKeysDirect = append(mapKeysDirect, k)
+ }
+ }
+
+ var writeMapKeys = func(mapKeys []string) {
+ sort.Strings(mapKeys)
+ for _, mapKey := range mapKeys {
+ mrv := rv.MapIndex(reflect.ValueOf(mapKey))
+ if isNil(mrv) {
+ // Don't write anything for nil fields.
+ continue
+ }
+ enc.encode(key.add(mapKey), mrv)
+ }
+ }
+ writeMapKeys(mapKeysDirect)
+ writeMapKeys(mapKeysSub)
+}
+
+func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
+ // Write keys for fields directly under this key first, because if we write
+ // a field that creates a new table, then all keys under it will be in that
+ // table (not the one we're writing here).
+ rt := rv.Type()
+ var fieldsDirect, fieldsSub [][]int
+ var addFields func(rt reflect.Type, rv reflect.Value, start []int)
+ addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
+ for i := 0; i < rt.NumField(); i++ {
+ f := rt.Field(i)
+ // skip unexported fields
+ if f.PkgPath != "" && !f.Anonymous {
+ continue
+ }
+ frv := rv.Field(i)
+ if f.Anonymous {
+ t := f.Type
+ switch t.Kind() {
+ case reflect.Struct:
+ // Treat anonymous struct fields with
+ // tag names as though they are not
+ // anonymous, like encoding/json does.
+ if getOptions(f.Tag).name == "" {
+ addFields(t, frv, f.Index)
+ continue
+ }
+ case reflect.Ptr:
+ if t.Elem().Kind() == reflect.Struct &&
+ getOptions(f.Tag).name == "" {
+ if !frv.IsNil() {
+ addFields(t.Elem(), frv.Elem(), f.Index)
+ }
+ continue
+ }
+ // Fall through to the normal field encoding logic below
+ // for non-struct anonymous fields.
+ }
+ }
+
+ if typeIsHash(tomlTypeOfGo(frv)) {
+ fieldsSub = append(fieldsSub, append(start, f.Index...))
+ } else {
+ fieldsDirect = append(fieldsDirect, append(start, f.Index...))
+ }
+ }
+ }
+ addFields(rt, rv, nil)
+
+ var writeFields = func(fields [][]int) {
+ for _, fieldIndex := range fields {
+ sft := rt.FieldByIndex(fieldIndex)
+ sf := rv.FieldByIndex(fieldIndex)
+ if isNil(sf) {
+ // Don't write anything for nil fields.
+ continue
+ }
+
+ opts := getOptions(sft.Tag)
+ if opts.skip {
+ continue
+ }
+ keyName := sft.Name
+ if opts.name != "" {
+ keyName = opts.name
+ }
+ if opts.omitempty && isEmpty(sf) {
+ continue
+ }
+ if opts.omitzero && isZero(sf) {
+ continue
+ }
+
+ enc.encode(key.add(keyName), sf)
+ }
+ }
+ writeFields(fieldsDirect)
+ writeFields(fieldsSub)
+}
+
+// tomlTypeName returns the TOML type name of the Go value's type. It is
+// used to determine whether the types of array elements are mixed (which is
+// forbidden). If the Go value is nil, then it is illegal for it to be an array
+// element, and valueIsNil is returned as true.
+
+// Returns the TOML type of a Go value. The type may be `nil`, which means
+// no concrete TOML type could be found.
+func tomlTypeOfGo(rv reflect.Value) tomlType {
+ if isNil(rv) || !rv.IsValid() {
+ return nil
+ }
+ switch rv.Kind() {
+ case reflect.Bool:
+ return tomlBool
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64:
+ return tomlInteger
+ case reflect.Float32, reflect.Float64:
+ return tomlFloat
+ case reflect.Array, reflect.Slice:
+ if typeEqual(tomlHash, tomlArrayType(rv)) {
+ return tomlArrayHash
+ }
+ return tomlArray
+ case reflect.Ptr, reflect.Interface:
+ return tomlTypeOfGo(rv.Elem())
+ case reflect.String:
+ return tomlString
+ case reflect.Map:
+ return tomlHash
+ case reflect.Struct:
+ switch rv.Interface().(type) {
+ case time.Time:
+ return tomlDatetime
+ case TextMarshaler:
+ return tomlString
+ default:
+ return tomlHash
+ }
+ default:
+ panic("unexpected reflect.Kind: " + rv.Kind().String())
+ }
+}
+
+// tomlArrayType returns the element type of a TOML array. The type returned
+// may be nil if it cannot be determined (e.g., a nil slice or a zero length
+// slize). This function may also panic if it finds a type that cannot be
+// expressed in TOML (such as nil elements, heterogeneous arrays or directly
+// nested arrays of tables).
+func tomlArrayType(rv reflect.Value) tomlType {
+ if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
+ return nil
+ }
+ firstType := tomlTypeOfGo(rv.Index(0))
+ if firstType == nil {
+ encPanic(errArrayNilElement)
+ }
+
+ rvlen := rv.Len()
+ for i := 1; i < rvlen; i++ {
+ elem := rv.Index(i)
+ switch elemType := tomlTypeOfGo(elem); {
+ case elemType == nil:
+ encPanic(errArrayNilElement)
+ case !typeEqual(firstType, elemType):
+ encPanic(errArrayMixedElementTypes)
+ }
+ }
+ // If we have a nested array, then we must make sure that the nested
+ // array contains ONLY primitives.
+ // This checks arbitrarily nested arrays.
+ if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
+ nest := tomlArrayType(eindirect(rv.Index(0)))
+ if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
+ encPanic(errArrayNoTable)
+ }
+ }
+ return firstType
+}
+
+type tagOptions struct {
+ skip bool // "-"
+ name string
+ omitempty bool
+ omitzero bool
+}
+
+func getOptions(tag reflect.StructTag) tagOptions {
+ t := tag.Get("toml")
+ if t == "-" {
+ return tagOptions{skip: true}
+ }
+ var opts tagOptions
+ parts := strings.Split(t, ",")
+ opts.name = parts[0]
+ for _, s := range parts[1:] {
+ switch s {
+ case "omitempty":
+ opts.omitempty = true
+ case "omitzero":
+ opts.omitzero = true
+ }
+ }
+ return opts
+}
+
+func isZero(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rv.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return rv.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return rv.Float() == 0.0
+ }
+ return false
+}
+
+func isEmpty(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+ return rv.Len() == 0
+ case reflect.Bool:
+ return !rv.Bool()
+ }
+ return false
+}
+
+func (enc *Encoder) newline() {
+ if enc.hasWritten {
+ enc.wf("\n")
+ }
+}
+
+func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
+ if len(key) == 0 {
+ encPanic(errNoKey)
+ }
+ panicIfInvalidKey(key)
+ enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
+ enc.eElement(val)
+ enc.newline()
+}
+
+func (enc *Encoder) wf(format string, v ...interface{}) {
+ if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
+ encPanic(err)
+ }
+ enc.hasWritten = true
+}
+
+func (enc *Encoder) indentStr(key Key) string {
+ return strings.Repeat(enc.Indent, len(key)-1)
+}
+
+func encPanic(err error) {
+ panic(tomlEncodeError{err})
+}
+
+func eindirect(v reflect.Value) reflect.Value {
+ switch v.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ return eindirect(v.Elem())
+ default:
+ return v
+ }
+}
+
+func isNil(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return rv.IsNil()
+ default:
+ return false
+ }
+}
+
+func panicIfInvalidKey(key Key) {
+ for _, k := range key {
+ if len(k) == 0 {
+ encPanic(e("Key '%s' is not a valid table name. Key names "+
+ "cannot be empty.", key.maybeQuotedAll()))
+ }
+ }
+}
+
+func isValidKeyName(s string) bool {
+ return len(s) != 0
+}
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go
new file mode 100644
index 00000000..d36e1dd6
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encoding_types.go
@@ -0,0 +1,19 @@
+// +build go1.2
+
+package toml
+
+// In order to support Go 1.1, we define our own TextMarshaler and
+// TextUnmarshaler types. For Go 1.2+, we just alias them with the
+// standard library interfaces.
+
+import (
+ "encoding"
+)
+
+// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
+// so that Go 1.1 can be supported.
+type TextMarshaler encoding.TextMarshaler
+
+// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
+// here so that Go 1.1 can be supported.
+type TextUnmarshaler encoding.TextUnmarshaler
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
new file mode 100644
index 00000000..e8d503d0
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
@@ -0,0 +1,18 @@
+// +build !go1.2
+
+package toml
+
+// These interfaces were introduced in Go 1.2, so we add them manually when
+// compiling for Go 1.1.
+
+// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
+// so that Go 1.1 can be supported.
+type TextMarshaler interface {
+ MarshalText() (text []byte, err error)
+}
+
+// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
+// here so that Go 1.1 can be supported.
+type TextUnmarshaler interface {
+ UnmarshalText(text []byte) error
+}
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
new file mode 100644
index 00000000..104ebda2
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -0,0 +1,858 @@
+package toml
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type itemType int
+
+const (
+ itemError itemType = iota
+ itemNIL // used in the parser to indicate no type
+ itemEOF
+ itemText
+ itemString
+ itemRawString
+ itemMultilineString
+ itemRawMultilineString
+ itemBool
+ itemInteger
+ itemFloat
+ itemDatetime
+ itemArray // the start of an array
+ itemArrayEnd
+ itemTableStart
+ itemTableEnd
+ itemArrayTableStart
+ itemArrayTableEnd
+ itemKeyStart
+ itemCommentStart
+)
+
+const (
+ eof = 0
+ tableStart = '['
+ tableEnd = ']'
+ arrayTableStart = '['
+ arrayTableEnd = ']'
+ tableSep = '.'
+ keySep = '='
+ arrayStart = '['
+ arrayEnd = ']'
+ arrayValTerm = ','
+ commentStart = '#'
+ stringStart = '"'
+ stringEnd = '"'
+ rawStringStart = '\''
+ rawStringEnd = '\''
+)
+
+type stateFn func(lx *lexer) stateFn
+
+type lexer struct {
+ input string
+ start int
+ pos int
+ width int
+ line int
+ state stateFn
+ items chan item
+
+ // A stack of state functions used to maintain context.
+ // The idea is to reuse parts of the state machine in various places.
+ // For example, values can appear at the top level or within arbitrarily
+ // nested arrays. The last state on the stack is used after a value has
+ // been lexed. Similarly for comments.
+ stack []stateFn
+}
+
+type item struct {
+ typ itemType
+ val string
+ line int
+}
+
+func (lx *lexer) nextItem() item {
+ for {
+ select {
+ case item := <-lx.items:
+ return item
+ default:
+ lx.state = lx.state(lx)
+ }
+ }
+}
+
+func lex(input string) *lexer {
+ lx := &lexer{
+ input: input + "\n",
+ state: lexTop,
+ line: 1,
+ items: make(chan item, 10),
+ stack: make([]stateFn, 0, 10),
+ }
+ return lx
+}
+
+func (lx *lexer) push(state stateFn) {
+ lx.stack = append(lx.stack, state)
+}
+
+func (lx *lexer) pop() stateFn {
+ if len(lx.stack) == 0 {
+ return lx.errorf("BUG in lexer: no states to pop.")
+ }
+ last := lx.stack[len(lx.stack)-1]
+ lx.stack = lx.stack[0 : len(lx.stack)-1]
+ return last
+}
+
+func (lx *lexer) current() string {
+ return lx.input[lx.start:lx.pos]
+}
+
+func (lx *lexer) emit(typ itemType) {
+ lx.items <- item{typ, lx.current(), lx.line}
+ lx.start = lx.pos
+}
+
+func (lx *lexer) emitTrim(typ itemType) {
+ lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
+ lx.start = lx.pos
+}
+
+func (lx *lexer) next() (r rune) {
+ if lx.pos >= len(lx.input) {
+ lx.width = 0
+ return eof
+ }
+
+ if lx.input[lx.pos] == '\n' {
+ lx.line++
+ }
+ r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
+ lx.pos += lx.width
+ return r
+}
+
+// ignore skips over the pending input before this point.
+func (lx *lexer) ignore() {
+ lx.start = lx.pos
+}
+
+// backup steps back one rune. Can be called only once per call of next.
+func (lx *lexer) backup() {
+ lx.pos -= lx.width
+ if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
+ lx.line--
+ }
+}
+
+// accept consumes the next rune if it's equal to `valid`.
+func (lx *lexer) accept(valid rune) bool {
+ if lx.next() == valid {
+ return true
+ }
+ lx.backup()
+ return false
+}
+
+// peek returns but does not consume the next rune in the input.
+func (lx *lexer) peek() rune {
+ r := lx.next()
+ lx.backup()
+ return r
+}
+
+// skip ignores all input that matches the given predicate.
+func (lx *lexer) skip(pred func(rune) bool) {
+ for {
+ r := lx.next()
+ if pred(r) {
+ continue
+ }
+ lx.backup()
+ lx.ignore()
+ return
+ }
+}
+
+// errorf stops all lexing by emitting an error and returning `nil`.
+// Note that any value that is a character is escaped if it's a special
+// character (new lines, tabs, etc.).
+func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
+ lx.items <- item{
+ itemError,
+ fmt.Sprintf(format, values...),
+ lx.line,
+ }
+ return nil
+}
+
+// lexTop consumes elements at the top level of TOML data.
+func lexTop(lx *lexer) stateFn {
+ r := lx.next()
+ if isWhitespace(r) || isNL(r) {
+ return lexSkip(lx, lexTop)
+ }
+
+ switch r {
+ case commentStart:
+ lx.push(lexTop)
+ return lexCommentStart
+ case tableStart:
+ return lexTableStart
+ case eof:
+ if lx.pos > lx.start {
+ return lx.errorf("Unexpected EOF.")
+ }
+ lx.emit(itemEOF)
+ return nil
+ }
+
+ // At this point, the only valid item can be a key, so we back up
+ // and let the key lexer do the rest.
+ lx.backup()
+ lx.push(lexTopEnd)
+ return lexKeyStart
+}
+
+// lexTopEnd is entered whenever a top-level item has been consumed. (A value
+// or a table.) It must see only whitespace, and will turn back to lexTop
+// upon a new line. If it sees EOF, it will quit the lexer successfully.
+func lexTopEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == commentStart:
+ // a comment will read to a new line for us.
+ lx.push(lexTop)
+ return lexCommentStart
+ case isWhitespace(r):
+ return lexTopEnd
+ case isNL(r):
+ lx.ignore()
+ return lexTop
+ case r == eof:
+ lx.ignore()
+ return lexTop
+ }
+ return lx.errorf("Expected a top-level item to end with a new line, "+
+ "comment or EOF, but got %q instead.", r)
+}
+
+// lexTable lexes the beginning of a table. Namely, it makes sure that
+// it starts with a character other than '.' and ']'.
+// It assumes that '[' has already been consumed.
+// It also handles the case that this is an item in an array of tables.
+// e.g., '[[name]]'.
+func lexTableStart(lx *lexer) stateFn {
+ if lx.peek() == arrayTableStart {
+ lx.next()
+ lx.emit(itemArrayTableStart)
+ lx.push(lexArrayTableEnd)
+ } else {
+ lx.emit(itemTableStart)
+ lx.push(lexTableEnd)
+ }
+ return lexTableNameStart
+}
+
+func lexTableEnd(lx *lexer) stateFn {
+ lx.emit(itemTableEnd)
+ return lexTopEnd
+}
+
+func lexArrayTableEnd(lx *lexer) stateFn {
+ if r := lx.next(); r != arrayTableEnd {
+ return lx.errorf("Expected end of table array name delimiter %q, "+
+ "but got %q instead.", arrayTableEnd, r)
+ }
+ lx.emit(itemArrayTableEnd)
+ return lexTopEnd
+}
+
+func lexTableNameStart(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.peek(); {
+ case r == tableEnd || r == eof:
+ return lx.errorf("Unexpected end of table name. (Table names cannot " +
+ "be empty.)")
+ case r == tableSep:
+ return lx.errorf("Unexpected table separator. (Table names cannot " +
+ "be empty.)")
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ lx.push(lexTableNameEnd)
+ return lexValue // reuse string lexing
+ default:
+ return lexBareTableName
+ }
+}
+
+// lexBareTableName lexes the name of a table. It assumes that at least one
+// valid character for the table has already been read.
+func lexBareTableName(lx *lexer) stateFn {
+ r := lx.next()
+ if isBareKeyChar(r) {
+ return lexBareTableName
+ }
+ lx.backup()
+ lx.emit(itemText)
+ return lexTableNameEnd
+}
+
+// lexTableNameEnd reads the end of a piece of a table name, optionally
+// consuming whitespace.
+func lexTableNameEnd(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.next(); {
+ case isWhitespace(r):
+ return lexTableNameEnd
+ case r == tableSep:
+ lx.ignore()
+ return lexTableNameStart
+ case r == tableEnd:
+ return lx.pop()
+ default:
+ return lx.errorf("Expected '.' or ']' to end table name, but got %q "+
+ "instead.", r)
+ }
+}
+
+// lexKeyStart consumes a key name up until the first non-whitespace character.
+// lexKeyStart will ignore whitespace.
+func lexKeyStart(lx *lexer) stateFn {
+ r := lx.peek()
+ switch {
+ case r == keySep:
+ return lx.errorf("Unexpected key separator %q.", keySep)
+ case isWhitespace(r) || isNL(r):
+ lx.next()
+ return lexSkip(lx, lexKeyStart)
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ lx.emit(itemKeyStart)
+ lx.push(lexKeyEnd)
+ return lexValue // reuse string lexing
+ default:
+ lx.ignore()
+ lx.emit(itemKeyStart)
+ return lexBareKey
+ }
+}
+
+// lexBareKey consumes the text of a bare key. Assumes that the first character
+// (which is not whitespace) has not yet been consumed.
+func lexBareKey(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case isBareKeyChar(r):
+ return lexBareKey
+ case isWhitespace(r):
+ lx.backup()
+ lx.emit(itemText)
+ return lexKeyEnd
+ case r == keySep:
+ lx.backup()
+ lx.emit(itemText)
+ return lexKeyEnd
+ default:
+ return lx.errorf("Bare keys cannot contain %q.", r)
+ }
+}
+
+// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
+// separator).
+func lexKeyEnd(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case r == keySep:
+ return lexSkip(lx, lexValue)
+ case isWhitespace(r):
+ return lexSkip(lx, lexKeyEnd)
+ default:
+ return lx.errorf("Expected key separator %q, but got %q instead.",
+ keySep, r)
+ }
+}
+
+// lexValue starts the consumption of a value anywhere a value is expected.
+// lexValue will ignore whitespace.
+// After a value is lexed, the last state on the next is popped and returned.
+func lexValue(lx *lexer) stateFn {
+ // We allow whitespace to precede a value, but NOT new lines.
+ // In array syntax, the array states are responsible for ignoring new
+ // lines.
+ r := lx.next()
+ switch {
+ case isWhitespace(r):
+ return lexSkip(lx, lexValue)
+ case isDigit(r):
+ lx.backup() // avoid an extra state and use the same as above
+ return lexNumberOrDateStart
+ }
+ switch r {
+ case arrayStart:
+ lx.ignore()
+ lx.emit(itemArray)
+ return lexArrayValue
+ case stringStart:
+ if lx.accept(stringStart) {
+ if lx.accept(stringStart) {
+ lx.ignore() // Ignore """
+ return lexMultilineString
+ }
+ lx.backup()
+ }
+ lx.ignore() // ignore the '"'
+ return lexString
+ case rawStringStart:
+ if lx.accept(rawStringStart) {
+ if lx.accept(rawStringStart) {
+ lx.ignore() // Ignore """
+ return lexMultilineRawString
+ }
+ lx.backup()
+ }
+ lx.ignore() // ignore the "'"
+ return lexRawString
+ case '+', '-':
+ return lexNumberStart
+ case '.': // special error case, be kind to users
+ return lx.errorf("Floats must start with a digit, not '.'.")
+ }
+ if unicode.IsLetter(r) {
+ // Be permissive here; lexBool will give a nice error if the
+ // user wrote something like
+ // x = foo
+ // (i.e. not 'true' or 'false' but is something else word-like.)
+ lx.backup()
+ return lexBool
+ }
+ return lx.errorf("Expected value but found %q instead.", r)
+}
+
+// lexArrayValue consumes one value in an array. It assumes that '[' or ','
+// have already been consumed. All whitespace and new lines are ignored.
+func lexArrayValue(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r) || isNL(r):
+ return lexSkip(lx, lexArrayValue)
+ case r == commentStart:
+ lx.push(lexArrayValue)
+ return lexCommentStart
+ case r == arrayValTerm:
+ return lx.errorf("Unexpected array value terminator %q.",
+ arrayValTerm)
+ case r == arrayEnd:
+ return lexArrayEnd
+ }
+
+ lx.backup()
+ lx.push(lexArrayValueEnd)
+ return lexValue
+}
+
+// lexArrayValueEnd consumes the cruft between values of an array. Namely,
+// it ignores whitespace and expects either a ',' or a ']'.
+func lexArrayValueEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r) || isNL(r):
+ return lexSkip(lx, lexArrayValueEnd)
+ case r == commentStart:
+ lx.push(lexArrayValueEnd)
+ return lexCommentStart
+ case r == arrayValTerm:
+ lx.ignore()
+ return lexArrayValue // move on to the next value
+ case r == arrayEnd:
+ return lexArrayEnd
+ }
+ return lx.errorf("Expected an array value terminator %q or an array "+
+ "terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
+}
+
+// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
+// just been consumed.
+func lexArrayEnd(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemArrayEnd)
+ return lx.pop()
+}
+
+// lexString consumes the inner contents of a string. It assumes that the
+// beginning '"' has already been consumed and ignored.
+func lexString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isNL(r):
+ return lx.errorf("Strings cannot contain new lines.")
+ case r == '\\':
+ lx.push(lexString)
+ return lexStringEscape
+ case r == stringEnd:
+ lx.backup()
+ lx.emit(itemString)
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ return lexString
+}
+
+// lexMultilineString consumes the inner contents of a string. It assumes that
+// the beginning '"""' has already been consumed and ignored.
+func lexMultilineString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == '\\':
+ return lexMultilineStringEscape
+ case r == stringEnd:
+ if lx.accept(stringEnd) {
+ if lx.accept(stringEnd) {
+ lx.backup()
+ lx.backup()
+ lx.backup()
+ lx.emit(itemMultilineString)
+ lx.next()
+ lx.next()
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ lx.backup()
+ }
+ }
+ return lexMultilineString
+}
+
+// lexRawString consumes a raw string. Nothing can be escaped in such a string.
+// It assumes that the beginning "'" has already been consumed and ignored.
+func lexRawString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isNL(r):
+ return lx.errorf("Strings cannot contain new lines.")
+ case r == rawStringEnd:
+ lx.backup()
+ lx.emit(itemRawString)
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ return lexRawString
+}
+
+// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
+// a string. It assumes that the beginning "'" has already been consumed and
+// ignored.
+func lexMultilineRawString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == rawStringEnd:
+ if lx.accept(rawStringEnd) {
+ if lx.accept(rawStringEnd) {
+ lx.backup()
+ lx.backup()
+ lx.backup()
+ lx.emit(itemRawMultilineString)
+ lx.next()
+ lx.next()
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ lx.backup()
+ }
+ }
+ return lexMultilineRawString
+}
+
+// lexMultilineStringEscape consumes an escaped character. It assumes that the
+// preceding '\\' has already been consumed.
+func lexMultilineStringEscape(lx *lexer) stateFn {
+ // Handle the special case first:
+ if isNL(lx.next()) {
+ return lexMultilineString
+ }
+ lx.backup()
+ lx.push(lexMultilineString)
+ return lexStringEscape(lx)
+}
+
+func lexStringEscape(lx *lexer) stateFn {
+ r := lx.next()
+ switch r {
+ case 'b':
+ fallthrough
+ case 't':
+ fallthrough
+ case 'n':
+ fallthrough
+ case 'f':
+ fallthrough
+ case 'r':
+ fallthrough
+ case '"':
+ fallthrough
+ case '\\':
+ return lx.pop()
+ case 'u':
+ return lexShortUnicodeEscape
+ case 'U':
+ return lexLongUnicodeEscape
+ }
+ return lx.errorf("Invalid escape character %q. Only the following "+
+ "escape characters are allowed: "+
+ "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+
+ "\\uXXXX and \\UXXXXXXXX.", r)
+}
+
+func lexShortUnicodeEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 4; i++ {
+ r = lx.next()
+ if !isHexadecimal(r) {
+ return lx.errorf("Expected four hexadecimal digits after '\\u', "+
+ "but got '%s' instead.", lx.current())
+ }
+ }
+ return lx.pop()
+}
+
+func lexLongUnicodeEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 8; i++ {
+ r = lx.next()
+ if !isHexadecimal(r) {
+ return lx.errorf("Expected eight hexadecimal digits after '\\U', "+
+ "but got '%s' instead.", lx.current())
+ }
+ }
+ return lx.pop()
+}
+
+// lexNumberOrDateStart consumes either an integer, a float, or datetime.
+func lexNumberOrDateStart(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexNumberOrDate
+ }
+ switch r {
+ case '_':
+ return lexNumber
+ case 'e', 'E':
+ return lexFloat
+ case '.':
+ return lx.errorf("Floats must start with a digit, not '.'.")
+ }
+ return lx.errorf("Expected a digit but got %q.", r)
+}
+
+// lexNumberOrDate consumes either an integer, float or datetime.
+func lexNumberOrDate(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexNumberOrDate
+ }
+ switch r {
+ case '-':
+ return lexDatetime
+ case '_':
+ return lexNumber
+ case '.', 'e', 'E':
+ return lexFloat
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexDatetime consumes a Datetime, to a first approximation.
+// The parser validates that it matches one of the accepted formats.
+func lexDatetime(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexDatetime
+ }
+ switch r {
+ case '-', 'T', ':', '.', 'Z':
+ return lexDatetime
+ }
+
+ lx.backup()
+ lx.emit(itemDatetime)
+ return lx.pop()
+}
+
+// lexNumberStart consumes either an integer or a float. It assumes that a sign
+// has already been read, but that *no* digits have been consumed.
+// lexNumberStart will move to the appropriate integer or float states.
+func lexNumberStart(lx *lexer) stateFn {
+ // We MUST see a digit. Even floats have to start with a digit.
+ r := lx.next()
+ if !isDigit(r) {
+ if r == '.' {
+ return lx.errorf("Floats must start with a digit, not '.'.")
+ }
+ return lx.errorf("Expected a digit but got %q.", r)
+ }
+ return lexNumber
+}
+
+// lexNumber consumes an integer or a float after seeing the first digit.
+func lexNumber(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexNumber
+ }
+ switch r {
+ case '_':
+ return lexNumber
+ case '.', 'e', 'E':
+ return lexFloat
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexFloat consumes the elements of a float. It allows any sequence of
+// float-like characters, so floats emitted by the lexer are only a first
+// approximation and must be validated by the parser.
+func lexFloat(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexFloat
+ }
+ switch r {
+ case '_', '.', '-', '+', 'e', 'E':
+ return lexFloat
+ }
+
+ lx.backup()
+ lx.emit(itemFloat)
+ return lx.pop()
+}
+
+// lexBool consumes a bool string: 'true' or 'false.
+func lexBool(lx *lexer) stateFn {
+ var rs []rune
+ for {
+ r := lx.next()
+ if r == eof || isWhitespace(r) || isNL(r) {
+ lx.backup()
+ break
+ }
+ rs = append(rs, r)
+ }
+ s := string(rs)
+ switch s {
+ case "true", "false":
+ lx.emit(itemBool)
+ return lx.pop()
+ }
+ return lx.errorf("Expected value but found %q instead.", s)
+}
+
+// lexCommentStart begins the lexing of a comment. It will emit
+// itemCommentStart and consume no characters, passing control to lexComment.
+func lexCommentStart(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemCommentStart)
+ return lexComment
+}
+
+// lexComment lexes an entire comment. It assumes that '#' has been consumed.
+// It will consume *up to* the first new line character, and pass control
+// back to the last state on the stack.
+func lexComment(lx *lexer) stateFn {
+ r := lx.peek()
+ if isNL(r) || r == eof {
+ lx.emit(itemText)
+ return lx.pop()
+ }
+ lx.next()
+ return lexComment
+}
+
+// lexSkip ignores all slurped input and moves on to the next state.
+func lexSkip(lx *lexer, nextState stateFn) stateFn {
+ return func(lx *lexer) stateFn {
+ lx.ignore()
+ return nextState
+ }
+}
+
+// isWhitespace returns true if `r` is a whitespace character according
+// to the spec.
+func isWhitespace(r rune) bool {
+ return r == '\t' || r == ' '
+}
+
+func isNL(r rune) bool {
+ return r == '\n' || r == '\r'
+}
+
+func isDigit(r rune) bool {
+ return r >= '0' && r <= '9'
+}
+
+func isHexadecimal(r rune) bool {
+ return (r >= '0' && r <= '9') ||
+ (r >= 'a' && r <= 'f') ||
+ (r >= 'A' && r <= 'F')
+}
+
+func isBareKeyChar(r rune) bool {
+ return (r >= 'A' && r <= 'Z') ||
+ (r >= 'a' && r <= 'z') ||
+ (r >= '0' && r <= '9') ||
+ r == '_' ||
+ r == '-'
+}
+
+func (itype itemType) String() string {
+ switch itype {
+ case itemError:
+ return "Error"
+ case itemNIL:
+ return "NIL"
+ case itemEOF:
+ return "EOF"
+ case itemText:
+ return "Text"
+ case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
+ return "String"
+ case itemBool:
+ return "Bool"
+ case itemInteger:
+ return "Integer"
+ case itemFloat:
+ return "Float"
+ case itemDatetime:
+ return "DateTime"
+ case itemTableStart:
+ return "TableStart"
+ case itemTableEnd:
+ return "TableEnd"
+ case itemKeyStart:
+ return "KeyStart"
+ case itemArray:
+ return "Array"
+ case itemArrayEnd:
+ return "ArrayEnd"
+ case itemCommentStart:
+ return "CommentStart"
+ }
+ panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
+}
+
+func (item item) String() string {
+ return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
+}
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
new file mode 100644
index 00000000..a5625555
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -0,0 +1,557 @@
+package toml
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+type parser struct {
+ mapping map[string]interface{}
+ types map[string]tomlType
+ lx *lexer
+
+ // A list of keys in the order that they appear in the TOML data.
+ ordered []Key
+
+ // the full key for the current hash in scope
+ context Key
+
+ // the base key name for everything except hashes
+ currentKey string
+
+ // rough approximation of line number
+ approxLine int
+
+ // A map of 'key.group.names' to whether they were created implicitly.
+ implicits map[string]bool
+}
+
+type parseError string
+
+func (pe parseError) Error() string {
+ return string(pe)
+}
+
+func parse(data string) (p *parser, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ var ok bool
+ if err, ok = r.(parseError); ok {
+ return
+ }
+ panic(r)
+ }
+ }()
+
+ p = &parser{
+ mapping: make(map[string]interface{}),
+ types: make(map[string]tomlType),
+ lx: lex(data),
+ ordered: make([]Key, 0),
+ implicits: make(map[string]bool),
+ }
+ for {
+ item := p.next()
+ if item.typ == itemEOF {
+ break
+ }
+ p.topLevel(item)
+ }
+
+ return p, nil
+}
+
+func (p *parser) panicf(format string, v ...interface{}) {
+ msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
+ p.approxLine, p.current(), fmt.Sprintf(format, v...))
+ panic(parseError(msg))
+}
+
+func (p *parser) next() item {
+ it := p.lx.nextItem()
+ if it.typ == itemError {
+ p.panicf("%s", it.val)
+ }
+ return it
+}
+
+func (p *parser) bug(format string, v ...interface{}) {
+ panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
+}
+
+func (p *parser) expect(typ itemType) item {
+ it := p.next()
+ p.assertEqual(typ, it.typ)
+ return it
+}
+
+func (p *parser) assertEqual(expected, got itemType) {
+ if expected != got {
+ p.bug("Expected '%s' but got '%s'.", expected, got)
+ }
+}
+
+func (p *parser) topLevel(item item) {
+ switch item.typ {
+ case itemCommentStart:
+ p.approxLine = item.line
+ p.expect(itemText)
+ case itemTableStart:
+ kg := p.next()
+ p.approxLine = kg.line
+
+ var key Key
+ for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
+ key = append(key, p.keyString(kg))
+ }
+ p.assertEqual(itemTableEnd, kg.typ)
+
+ p.establishContext(key, false)
+ p.setType("", tomlHash)
+ p.ordered = append(p.ordered, key)
+ case itemArrayTableStart:
+ kg := p.next()
+ p.approxLine = kg.line
+
+ var key Key
+ for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
+ key = append(key, p.keyString(kg))
+ }
+ p.assertEqual(itemArrayTableEnd, kg.typ)
+
+ p.establishContext(key, true)
+ p.setType("", tomlArrayHash)
+ p.ordered = append(p.ordered, key)
+ case itemKeyStart:
+ kname := p.next()
+ p.approxLine = kname.line
+ p.currentKey = p.keyString(kname)
+
+ val, typ := p.value(p.next())
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ)
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ p.currentKey = ""
+ default:
+ p.bug("Unexpected type at top level: %s", item.typ)
+ }
+}
+
+// Gets a string for a key (or part of a key in a table name).
+func (p *parser) keyString(it item) string {
+ switch it.typ {
+ case itemText:
+ return it.val
+ case itemString, itemMultilineString,
+ itemRawString, itemRawMultilineString:
+ s, _ := p.value(it)
+ return s.(string)
+ default:
+ p.bug("Unexpected key type: %s", it.typ)
+ panic("unreachable")
+ }
+}
+
+// value translates an expected value from the lexer into a Go value wrapped
+// as an empty interface.
+func (p *parser) value(it item) (interface{}, tomlType) {
+ switch it.typ {
+ case itemString:
+ return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
+ case itemMultilineString:
+ trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
+ return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
+ case itemRawString:
+ return it.val, p.typeOfPrimitive(it)
+ case itemRawMultilineString:
+ return stripFirstNewline(it.val), p.typeOfPrimitive(it)
+ case itemBool:
+ switch it.val {
+ case "true":
+ return true, p.typeOfPrimitive(it)
+ case "false":
+ return false, p.typeOfPrimitive(it)
+ }
+ p.bug("Expected boolean value, but got '%s'.", it.val)
+ case itemInteger:
+ if !numUnderscoresOK(it.val) {
+ p.panicf("Invalid integer %q: underscores must be surrounded by digits",
+ it.val)
+ }
+ val := strings.Replace(it.val, "_", "", -1)
+ num, err := strconv.ParseInt(val, 10, 64)
+ if err != nil {
+ // Distinguish integer values. Normally, it'd be a bug if the lexer
+ // provides an invalid integer, but it's possible that the number is
+ // out of range of valid values (which the lexer cannot determine).
+ // So mark the former as a bug but the latter as a legitimate user
+ // error.
+ if e, ok := err.(*strconv.NumError); ok &&
+ e.Err == strconv.ErrRange {
+
+ p.panicf("Integer '%s' is out of the range of 64-bit "+
+ "signed integers.", it.val)
+ } else {
+ p.bug("Expected integer value, but got '%s'.", it.val)
+ }
+ }
+ return num, p.typeOfPrimitive(it)
+ case itemFloat:
+ parts := strings.FieldsFunc(it.val, func(r rune) bool {
+ switch r {
+ case '.', 'e', 'E':
+ return true
+ }
+ return false
+ })
+ for _, part := range parts {
+ if !numUnderscoresOK(part) {
+ p.panicf("Invalid float %q: underscores must be "+
+ "surrounded by digits", it.val)
+ }
+ }
+ if !numPeriodsOK(it.val) {
+ // As a special case, numbers like '123.' or '1.e2',
+ // which are valid as far as Go/strconv are concerned,
+ // must be rejected because TOML says that a fractional
+ // part consists of '.' followed by 1+ digits.
+ p.panicf("Invalid float %q: '.' must be followed "+
+ "by one or more digits", it.val)
+ }
+ val := strings.Replace(it.val, "_", "", -1)
+ num, err := strconv.ParseFloat(val, 64)
+ if err != nil {
+ if e, ok := err.(*strconv.NumError); ok &&
+ e.Err == strconv.ErrRange {
+
+ p.panicf("Float '%s' is out of the range of 64-bit "+
+ "IEEE-754 floating-point numbers.", it.val)
+ } else {
+ p.panicf("Invalid float value: %q", it.val)
+ }
+ }
+ return num, p.typeOfPrimitive(it)
+ case itemDatetime:
+ var t time.Time
+ var ok bool
+ var err error
+ for _, format := range []string{
+ "2006-01-02T15:04:05Z07:00",
+ "2006-01-02T15:04:05",
+ "2006-01-02",
+ } {
+ t, err = time.ParseInLocation(format, it.val, time.Local)
+ if err == nil {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ p.panicf("Invalid TOML Datetime: %q.", it.val)
+ }
+ return t, p.typeOfPrimitive(it)
+ case itemArray:
+ array := make([]interface{}, 0)
+ types := make([]tomlType, 0)
+
+ for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
+ }
+
+ val, typ := p.value(it)
+ array = append(array, val)
+ types = append(types, typ)
+ }
+ return array, p.typeOfArray(types)
+ }
+ p.bug("Unexpected value type: %s", it.typ)
+ panic("unreachable")
+}
+
+// numUnderscoresOK checks whether each underscore in s is surrounded by
+// characters that are not underscores.
+func numUnderscoresOK(s string) bool {
+ accept := false
+ for _, r := range s {
+ if r == '_' {
+ if !accept {
+ return false
+ }
+ accept = false
+ continue
+ }
+ accept = true
+ }
+ return accept
+}
+
+// numPeriodsOK checks whether every period in s is followed by a digit.
+func numPeriodsOK(s string) bool {
+ period := false
+ for _, r := range s {
+ if period && !isDigit(r) {
+ return false
+ }
+ period = r == '.'
+ }
+ return !period
+}
+
+// establishContext sets the current context of the parser,
+// where the context is either a hash or an array of hashes. Which one is
+// set depends on the value of the `array` parameter.
+//
+// Establishing the context also makes sure that the key isn't a duplicate, and
+// will create implicit hashes automatically.
+func (p *parser) establishContext(key Key, array bool) {
+ var ok bool
+
+ // Always start at the top level and drill down for our context.
+ hashContext := p.mapping
+ keyContext := make(Key, 0)
+
+ // We only need implicit hashes for key[0:-1]
+ for _, k := range key[0 : len(key)-1] {
+ _, ok = hashContext[k]
+ keyContext = append(keyContext, k)
+
+ // No key? Make an implicit hash and move on.
+ if !ok {
+ p.addImplicit(keyContext)
+ hashContext[k] = make(map[string]interface{})
+ }
+
+ // If the hash context is actually an array of tables, then set
+ // the hash context to the last element in that array.
+ //
+ // Otherwise, it better be a table, since this MUST be a key group (by
+ // virtue of it not being the last element in a key).
+ switch t := hashContext[k].(type) {
+ case []map[string]interface{}:
+ hashContext = t[len(t)-1]
+ case map[string]interface{}:
+ hashContext = t
+ default:
+ p.panicf("Key '%s' was already created as a hash.", keyContext)
+ }
+ }
+
+ p.context = keyContext
+ if array {
+ // If this is the first element for this array, then allocate a new
+ // list of tables for it.
+ k := key[len(key)-1]
+ if _, ok := hashContext[k]; !ok {
+ hashContext[k] = make([]map[string]interface{}, 0, 5)
+ }
+
+ // Add a new table. But make sure the key hasn't already been used
+ // for something else.
+ if hash, ok := hashContext[k].([]map[string]interface{}); ok {
+ hashContext[k] = append(hash, make(map[string]interface{}))
+ } else {
+ p.panicf("Key '%s' was already created and cannot be used as "+
+ "an array.", keyContext)
+ }
+ } else {
+ p.setValue(key[len(key)-1], make(map[string]interface{}))
+ }
+ p.context = append(p.context, key[len(key)-1])
+}
+
+// setValue sets the given key to the given value in the current context.
+// It will make sure that the key hasn't already been defined, account for
+// implicit key groups.
+func (p *parser) setValue(key string, value interface{}) {
+ var tmpHash interface{}
+ var ok bool
+
+ hash := p.mapping
+ keyContext := make(Key, 0)
+ for _, k := range p.context {
+ keyContext = append(keyContext, k)
+ if tmpHash, ok = hash[k]; !ok {
+ p.bug("Context for key '%s' has not been established.", keyContext)
+ }
+ switch t := tmpHash.(type) {
+ case []map[string]interface{}:
+ // The context is a table of hashes. Pick the most recent table
+ // defined as the current hash.
+ hash = t[len(t)-1]
+ case map[string]interface{}:
+ hash = t
+ default:
+ p.bug("Expected hash to have type 'map[string]interface{}', but "+
+ "it has '%T' instead.", tmpHash)
+ }
+ }
+ keyContext = append(keyContext, key)
+
+ if _, ok := hash[key]; ok {
+ // Typically, if the given key has already been set, then we have
+ // to raise an error since duplicate keys are disallowed. However,
+ // it's possible that a key was previously defined implicitly. In this
+ // case, it is allowed to be redefined concretely. (See the
+ // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
+ //
+ // But we have to make sure to stop marking it as an implicit. (So that
+ // another redefinition provokes an error.)
+ //
+ // Note that since it has already been defined (as a hash), we don't
+ // want to overwrite it. So our business is done.
+ if p.isImplicit(keyContext) {
+ p.removeImplicit(keyContext)
+ return
+ }
+
+ // Otherwise, we have a concrete key trying to override a previous
+ // key, which is *always* wrong.
+ p.panicf("Key '%s' has already been defined.", keyContext)
+ }
+ hash[key] = value
+}
+
+// setType sets the type of a particular value at a given key.
+// It should be called immediately AFTER setValue.
+//
+// Note that if `key` is empty, then the type given will be applied to the
+// current context (which is either a table or an array of tables).
+func (p *parser) setType(key string, typ tomlType) {
+ keyContext := make(Key, 0, len(p.context)+1)
+ for _, k := range p.context {
+ keyContext = append(keyContext, k)
+ }
+ if len(key) > 0 { // allow type setting for hashes
+ keyContext = append(keyContext, key)
+ }
+ p.types[keyContext.String()] = typ
+}
+
+// addImplicit sets the given Key as having been created implicitly.
+func (p *parser) addImplicit(key Key) {
+ p.implicits[key.String()] = true
+}
+
+// removeImplicit stops tagging the given key as having been implicitly
+// created.
+func (p *parser) removeImplicit(key Key) {
+ p.implicits[key.String()] = false
+}
+
+// isImplicit returns true if the key group pointed to by the key was created
+// implicitly.
+func (p *parser) isImplicit(key Key) bool {
+ return p.implicits[key.String()]
+}
+
+// current returns the full key name of the current context.
+func (p *parser) current() string {
+ if len(p.currentKey) == 0 {
+ return p.context.String()
+ }
+ if len(p.context) == 0 {
+ return p.currentKey
+ }
+ return fmt.Sprintf("%s.%s", p.context, p.currentKey)
+}
+
+func stripFirstNewline(s string) string {
+ if len(s) == 0 || s[0] != '\n' {
+ return s
+ }
+ return s[1:]
+}
+
+func stripEscapedWhitespace(s string) string {
+ esc := strings.Split(s, "\\\n")
+ if len(esc) > 1 {
+ for i := 1; i < len(esc); i++ {
+ esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
+ }
+ }
+ return strings.Join(esc, "")
+}
+
+func (p *parser) replaceEscapes(str string) string {
+ var replaced []rune
+ s := []byte(str)
+ r := 0
+ for r < len(s) {
+ if s[r] != '\\' {
+ c, size := utf8.DecodeRune(s[r:])
+ r += size
+ replaced = append(replaced, c)
+ continue
+ }
+ r += 1
+ if r >= len(s) {
+ p.bug("Escape sequence at end of string.")
+ return ""
+ }
+ switch s[r] {
+ default:
+ p.bug("Expected valid escape code after \\, but got %q.", s[r])
+ return ""
+ case 'b':
+ replaced = append(replaced, rune(0x0008))
+ r += 1
+ case 't':
+ replaced = append(replaced, rune(0x0009))
+ r += 1
+ case 'n':
+ replaced = append(replaced, rune(0x000A))
+ r += 1
+ case 'f':
+ replaced = append(replaced, rune(0x000C))
+ r += 1
+ case 'r':
+ replaced = append(replaced, rune(0x000D))
+ r += 1
+ case '"':
+ replaced = append(replaced, rune(0x0022))
+ r += 1
+ case '\\':
+ replaced = append(replaced, rune(0x005C))
+ r += 1
+ case 'u':
+ // At this point, we know we have a Unicode escape of the form
+ // `uXXXX` at [r, r+5). (Because the lexer guarantees this
+ // for us.)
+ escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
+ replaced = append(replaced, escaped)
+ r += 5
+ case 'U':
+ // At this point, we know we have a Unicode escape of the form
+ // `uXXXX` at [r, r+9). (Because the lexer guarantees this
+ // for us.)
+ escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
+ replaced = append(replaced, escaped)
+ r += 9
+ }
+ }
+ return string(replaced)
+}
+
+func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
+ s := string(bs)
+ hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
+ if err != nil {
+ p.bug("Could not parse '%s' as a hexadecimal number, but the "+
+ "lexer claims it's OK: %s", s, err)
+ }
+ if !utf8.ValidRune(rune(hex)) {
+ p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
+ }
+ return rune(hex)
+}
+
+func isStringType(ty itemType) bool {
+ return ty == itemString || ty == itemMultilineString ||
+ ty == itemRawString || ty == itemRawMultilineString
+}
diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go
new file mode 100644
index 00000000..c73f8afc
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/type_check.go
@@ -0,0 +1,91 @@
+package toml
+
+// tomlType represents any Go type that corresponds to a TOML type.
+// While the first draft of the TOML spec has a simplistic type system that
+// probably doesn't need this level of sophistication, we seem to be militating
+// toward adding real composite types.
+type tomlType interface {
+ typeString() string
+}
+
+// typeEqual accepts any two types and returns true if they are equal.
+func typeEqual(t1, t2 tomlType) bool {
+ if t1 == nil || t2 == nil {
+ return false
+ }
+ return t1.typeString() == t2.typeString()
+}
+
+func typeIsHash(t tomlType) bool {
+ return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
+}
+
+type tomlBaseType string
+
+func (btype tomlBaseType) typeString() string {
+ return string(btype)
+}
+
+func (btype tomlBaseType) String() string {
+ return btype.typeString()
+}
+
+var (
+ tomlInteger tomlBaseType = "Integer"
+ tomlFloat tomlBaseType = "Float"
+ tomlDatetime tomlBaseType = "Datetime"
+ tomlString tomlBaseType = "String"
+ tomlBool tomlBaseType = "Bool"
+ tomlArray tomlBaseType = "Array"
+ tomlHash tomlBaseType = "Hash"
+ tomlArrayHash tomlBaseType = "ArrayHash"
+)
+
+// typeOfPrimitive returns a tomlType of any primitive value in TOML.
+// Primitive values are: Integer, Float, Datetime, String and Bool.
+//
+// Passing a lexer item other than the following will cause a BUG message
+// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
+func (p *parser) typeOfPrimitive(lexItem item) tomlType {
+ switch lexItem.typ {
+ case itemInteger:
+ return tomlInteger
+ case itemFloat:
+ return tomlFloat
+ case itemDatetime:
+ return tomlDatetime
+ case itemString:
+ return tomlString
+ case itemMultilineString:
+ return tomlString
+ case itemRawString:
+ return tomlString
+ case itemRawMultilineString:
+ return tomlString
+ case itemBool:
+ return tomlBool
+ }
+ p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
+ panic("unreachable")
+}
+
+// typeOfArray returns a tomlType for an array given a list of types of its
+// values.
+//
+// In the current spec, if an array is homogeneous, then its type is always
+// "Array". If the array is not homogeneous, an error is generated.
+func (p *parser) typeOfArray(types []tomlType) tomlType {
+ // Empty arrays are cool.
+ if len(types) == 0 {
+ return tomlArray
+ }
+
+ theType := types[0]
+ for _, t := range types[1:] {
+ if !typeEqual(theType, t) {
+ p.panicf("Array contains values of type '%s' and '%s', but "+
+ "arrays must be homogeneous.", theType, t)
+ }
+ }
+ return tomlArray
+}
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
new file mode 100644
index 00000000..608997c2
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/type_fields.go
@@ -0,0 +1,242 @@
+package toml
+
+// Struct field handling is adapted from code in encoding/json:
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the Go distribution.
+
+import (
+ "reflect"
+ "sort"
+ "sync"
+)
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string // the name of the field (`toml` tag included)
+ tag bool // whether field has a `toml` tag
+ index []int // represents the depth of an anonymous field
+ typ reflect.Type // the type of the field
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from toml tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that TOML should recognize for the given
+// type. The algorithm is breadth-first search over the set of structs to
+// include - the top struct and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" && !sf.Anonymous { // unexported
+ continue
+ }
+ opts := getOptions(sf.Tag)
+ if opts.skip {
+ continue
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := opts.name != ""
+ name := opts.name
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, field{name, tagged, index, ft})
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ f := field{name: ft.Name(), index: index, typ: ft}
+ next = append(next, f)
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with TOML tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// TOML tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
diff --git a/vendor/gopkg.in/gcfg.v1/LICENSE b/vendor/gopkg.in/gcfg.v1/LICENSE
deleted file mode 100644
index 87a5cede..00000000
--- a/vendor/gopkg.in/gcfg.v1/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go
-Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/gcfg.v1/doc.go b/vendor/gopkg.in/gcfg.v1/doc.go
deleted file mode 100644
index 0e198e36..00000000
--- a/vendor/gopkg.in/gcfg.v1/doc.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Package gcfg reads "INI-style" text-based configuration files with
-// "name=value" pairs grouped into sections (gcfg files).
-//
-// This package is still a work in progress; see the sections below for planned
-// changes.
-//
-// Syntax
-//
-// The syntax is based on that used by git config:
-// http://git-scm.com/docs/git-config#_syntax .
-// There are some (planned) differences compared to the git config format:
-// - improve data portability:
-// - must be encoded in UTF-8 (for now) and must not contain the 0 byte
-// - include and "path" type is not supported
-// (path type may be implementable as a user-defined type)
-// - internationalization
-// - section and variable names can contain unicode letters, unicode digits
-// (as defined in http://golang.org/ref/spec#Characters ) and hyphens
-// (U+002D), starting with a unicode letter
-// - disallow potentially ambiguous or misleading definitions:
-// - `[sec.sub]` format is not allowed (deprecated in gitconfig)
-// - `[sec ""]` is not allowed
-// - use `[sec]` for section name "sec" and empty subsection name
-// - (planned) within a single file, definitions must be contiguous for each:
-// - section: '[secA]' -> '[secB]' -> '[secA]' is an error
-// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error
-// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error
-//
-// Data structure
-//
-// The functions in this package read values into a user-defined struct.
-// Each section corresponds to a struct field in the config struct, and each
-// variable in a section corresponds to a data field in the section struct.
-// The mapping of each section or variable name to fields is done either based
-// on the "gcfg" struct tag or by matching the name of the section or variable,
-// ignoring case. In the latter case, hyphens '-' in section and variable names
-// correspond to underscores '_' in field names.
-// Fields must be exported; to use a section or variable name starting with a
-// letter that is neither upper- or lower-case, prefix the field name with 'X'.
-// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .)
-//
-// For sections with subsections, the corresponding field in config must be a
-// map, rather than a struct, with string keys and pointer-to-struct values.
-// Values for subsection variables are stored in the map with the subsection
-// name used as the map key.
-// (Note that unlike section and variable names, subsection names are case
-// sensitive.)
-// When using a map, and there is a section with the same section name but
-// without a subsection name, its values are stored with the empty string used
-// as the key.
-//
-// The functions in this package panic if config is not a pointer to a struct,
-// or when a field is not of a suitable type (either a struct or a map with
-// string keys and pointer-to-struct values).
-//
-// Parsing of values
-//
-// The section structs in the config struct may contain single-valued or
-// multi-valued variables. Variables of unnamed slice type (that is, a type
-// starting with `[]`) are treated as multi-value; all others (including named
-// slice types) are treated as single-valued variables.
-//
-// Single-valued variables are handled based on the type as follows.
-// Unnamed pointer types (that is, types starting with `*`) are dereferenced,
-// and if necessary, a new instance is allocated.
-//
-// For types implementing the encoding.TextUnmarshaler interface, the
-// UnmarshalText method is used to set the value. Implementing this method is
-// the recommended way for parsing user-defined types.
-//
-// For fields of string kind, the value string is assigned to the field, after
-// unquoting and unescaping as needed.
-// For fields of bool kind, the field is set to true if the value is "true",
-// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or
-// "0", ignoring case. In addition, single-valued bool fields can be specified
-// with a "blank" value (variable name without equals sign and value); in such
-// case the value is set to true.
-//
-// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as
-// decimal or hexadecimal (if having '0x' prefix). (This is to prevent
-// unintuitively handling zero-padded numbers as octal.) Other types having
-// [u]int* as the underlying type, such as os.FileMode and uintptr allow
-// decimal, hexadecimal, or octal values.
-// Parsing mode for integer types can be overridden using the struct tag option
-// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters
-// (each standing for decimal, hexadecimal, and octal, respectively.)
-//
-// All other types are parsed using fmt.Sscanf with the "%v" verb.
-//
-// For multi-valued variables, each individual value is parsed as above and
-// appended to the slice. If the first value is specified as a "blank" value
-// (variable name without equals sign and value), a new slice is allocated;
-// that is any values previously set in the slice will be ignored.
-//
-// The types subpackage for provides helpers for parsing "enum-like" and integer
-// types.
-//
-// TODO
-//
-// The following is a list of changes under consideration:
-// - documentation
-// - self-contained syntax documentation
-// - more practical examples
-// - move TODOs to issue tracker (eventually)
-// - syntax
-// - reconsider valid escape sequences
-// (gitconfig doesn't support \r in value, \t in subsection name, etc.)
-// - reading / parsing gcfg files
-// - define internal representation structure
-// - support multiple inputs (readers, strings, files)
-// - support declaring encoding (?)
-// - support varying fields sets for subsections (?)
-// - writing gcfg files
-// - error handling
-// - make error context accessible programmatically?
-// - limit input size?
-//
-package gcfg // import "gopkg.in/gcfg.v1"
diff --git a/vendor/gopkg.in/gcfg.v1/go1_0.go b/vendor/gopkg.in/gcfg.v1/go1_0.go
deleted file mode 100644
index 66702107..00000000
--- a/vendor/gopkg.in/gcfg.v1/go1_0.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build !go1.2
-
-package gcfg
-
-type textUnmarshaler interface {
- UnmarshalText(text []byte) error
-}
diff --git a/vendor/gopkg.in/gcfg.v1/go1_2.go b/vendor/gopkg.in/gcfg.v1/go1_2.go
deleted file mode 100644
index 6f5843bc..00000000
--- a/vendor/gopkg.in/gcfg.v1/go1_2.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build go1.2
-
-package gcfg
-
-import (
- "encoding"
-)
-
-type textUnmarshaler encoding.TextUnmarshaler
diff --git a/vendor/gopkg.in/gcfg.v1/read.go b/vendor/gopkg.in/gcfg.v1/read.go
deleted file mode 100644
index fdfb5f3a..00000000
--- a/vendor/gopkg.in/gcfg.v1/read.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package gcfg
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "strings"
-)
-
-import (
- "gopkg.in/gcfg.v1/scanner"
- "gopkg.in/gcfg.v1/token"
-)
-
-var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t'}
-
-// no error: invalid literals should be caught by scanner
-func unquote(s string) string {
- u, q, esc := make([]rune, 0, len(s)), false, false
- for _, c := range s {
- if esc {
- uc, ok := unescape[c]
- switch {
- case ok:
- u = append(u, uc)
- fallthrough
- case !q && c == '\n':
- esc = false
- continue
- }
- panic("invalid escape sequence")
- }
- switch c {
- case '"':
- q = !q
- case '\\':
- esc = true
- default:
- u = append(u, c)
- }
- }
- if q {
- panic("missing end quote")
- }
- if esc {
- panic("invalid escape sequence")
- }
- return string(u)
-}
-
-func readInto(config interface{}, fset *token.FileSet, file *token.File, src []byte) error {
- var s scanner.Scanner
- var errs scanner.ErrorList
- s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0)
- sect, sectsub := "", ""
- pos, tok, lit := s.Scan()
- errfn := func(msg string) error {
- return fmt.Errorf("%s: %s", fset.Position(pos), msg)
- }
- for {
- if errs.Len() > 0 {
- return errs.Err()
- }
- switch tok {
- case token.EOF:
- return nil
- case token.EOL, token.COMMENT:
- pos, tok, lit = s.Scan()
- case token.LBRACK:
- pos, tok, lit = s.Scan()
- if errs.Len() > 0 {
- return errs.Err()
- }
- if tok != token.IDENT {
- return errfn("expected section name")
- }
- sect, sectsub = lit, ""
- pos, tok, lit = s.Scan()
- if errs.Len() > 0 {
- return errs.Err()
- }
- if tok == token.STRING {
- sectsub = unquote(lit)
- if sectsub == "" {
- return errfn("empty subsection name")
- }
- pos, tok, lit = s.Scan()
- if errs.Len() > 0 {
- return errs.Err()
- }
- }
- if tok != token.RBRACK {
- if sectsub == "" {
- return errfn("expected subsection name or right bracket")
- }
- return errfn("expected right bracket")
- }
- pos, tok, lit = s.Scan()
- if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
- return errfn("expected EOL, EOF, or comment")
- }
- // If a section/subsection header was found, ensure a
- // container object is created, even if there are no
- // variables further down.
- err := set(config, sect, sectsub, "", true, "")
- if err != nil {
- return err
- }
- case token.IDENT:
- if sect == "" {
- return errfn("expected section header")
- }
- n := lit
- pos, tok, lit = s.Scan()
- if errs.Len() > 0 {
- return errs.Err()
- }
- blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, ""
- if !blank {
- if tok != token.ASSIGN {
- return errfn("expected '='")
- }
- pos, tok, lit = s.Scan()
- if errs.Len() > 0 {
- return errs.Err()
- }
- if tok != token.STRING {
- return errfn("expected value")
- }
- v = unquote(lit)
- pos, tok, lit = s.Scan()
- if errs.Len() > 0 {
- return errs.Err()
- }
- if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
- return errfn("expected EOL, EOF, or comment")
- }
- }
- err := set(config, sect, sectsub, n, blank, v)
- if err != nil {
- return err
- }
- default:
- if sect == "" {
- return errfn("expected section header")
- }
- return errfn("expected section header or variable declaration")
- }
- }
- panic("never reached")
-}
-
-// ReadInto reads gcfg formatted data from reader and sets the values into the
-// corresponding fields in config.
-func ReadInto(config interface{}, reader io.Reader) error {
- src, err := ioutil.ReadAll(reader)
- if err != nil {
- return err
- }
- fset := token.NewFileSet()
- file := fset.AddFile("", fset.Base(), len(src))
- return readInto(config, fset, file, src)
-}
-
-// ReadStringInto reads gcfg formatted data from str and sets the values into
-// the corresponding fields in config.
-func ReadStringInto(config interface{}, str string) error {
- r := strings.NewReader(str)
- return ReadInto(config, r)
-}
-
-// ReadFileInto reads gcfg formatted data from the file filename and sets the
-// values into the corresponding fields in config.
-func ReadFileInto(config interface{}, filename string) error {
- f, err := os.Open(filename)
- if err != nil {
- return err
- }
- defer f.Close()
- src, err := ioutil.ReadAll(f)
- if err != nil {
- return err
- }
- fset := token.NewFileSet()
- file := fset.AddFile(filename, fset.Base(), len(src))
- return readInto(config, fset, file, src)
-}
diff --git a/vendor/gopkg.in/gcfg.v1/scanner/errors.go b/vendor/gopkg.in/gcfg.v1/scanner/errors.go
deleted file mode 100644
index 1a3c0f65..00000000
--- a/vendor/gopkg.in/gcfg.v1/scanner/errors.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package scanner
-
-import (
- "fmt"
- "io"
- "sort"
-)
-
-import (
- "gopkg.in/gcfg.v1/token"
-)
-
-// In an ErrorList, an error is represented by an *Error.
-// The position Pos, if valid, points to the beginning of
-// the offending token, and the error condition is described
-// by Msg.
-//
-type Error struct {
- Pos token.Position
- Msg string
-}
-
-// Error implements the error interface.
-func (e Error) Error() string {
- if e.Pos.Filename != "" || e.Pos.IsValid() {
- // don't print "<unknown position>"
- // TODO(gri) reconsider the semantics of Position.IsValid
- return e.Pos.String() + ": " + e.Msg
- }
- return e.Msg
-}
-
-// ErrorList is a list of *Errors.
-// The zero value for an ErrorList is an empty ErrorList ready to use.
-//
-type ErrorList []*Error
-
-// Add adds an Error with given position and error message to an ErrorList.
-func (p *ErrorList) Add(pos token.Position, msg string) {
- *p = append(*p, &Error{pos, msg})
-}
-
-// Reset resets an ErrorList to no errors.
-func (p *ErrorList) Reset() { *p = (*p)[0:0] }
-
-// ErrorList implements the sort Interface.
-func (p ErrorList) Len() int { return len(p) }
-func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
-func (p ErrorList) Less(i, j int) bool {
- e := &p[i].Pos
- f := &p[j].Pos
- if e.Filename < f.Filename {
- return true
- }
- if e.Filename == f.Filename {
- return e.Offset < f.Offset
- }
- return false
-}
-
-// Sort sorts an ErrorList. *Error entries are sorted by position,
-// other errors are sorted by error message, and before any *Error
-// entry.
-//
-func (p ErrorList) Sort() {
- sort.Sort(p)
-}
-
-// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
-func (p *ErrorList) RemoveMultiples() {
- sort.Sort(p)
- var last token.Position // initial last.Line is != any legal error line
- i := 0
- for _, e := range *p {
- if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
- last = e.Pos
- (*p)[i] = e
- i++
- }
- }
- (*p) = (*p)[0:i]
-}
-
-// An ErrorList implements the error interface.
-func (p ErrorList) Error() string {
- switch len(p) {
- case 0:
- return "no errors"
- case 1:
- return p[0].Error()
- }
- return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
-}
-
-// Err returns an error equivalent to this error list.
-// If the list is empty, Err returns nil.
-func (p ErrorList) Err() error {
- if len(p) == 0 {
- return nil
- }
- return p
-}
-
-// PrintError is a utility function that prints a list of errors to w,
-// one error per line, if the err parameter is an ErrorList. Otherwise
-// it prints the err string.
-//
-func PrintError(w io.Writer, err error) {
- if list, ok := err.(ErrorList); ok {
- for _, e := range list {
- fmt.Fprintf(w, "%s\n", e)
- }
- } else if err != nil {
- fmt.Fprintf(w, "%s\n", err)
- }
-}
diff --git a/vendor/gopkg.in/gcfg.v1/scanner/scanner.go b/vendor/gopkg.in/gcfg.v1/scanner/scanner.go
deleted file mode 100644
index bbbdbf53..00000000
--- a/vendor/gopkg.in/gcfg.v1/scanner/scanner.go
+++ /dev/null
@@ -1,342 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package scanner implements a scanner for gcfg configuration text.
-// It takes a []byte as source which can then be tokenized
-// through repeated calls to the Scan method.
-//
-// Note that the API for the scanner package may change to accommodate new
-// features or implementation changes in gcfg.
-//
-package scanner
-
-import (
- "fmt"
- "path/filepath"
- "unicode"
- "unicode/utf8"
-)
-
-import (
- "gopkg.in/gcfg.v1/token"
-)
-
-// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
-// encountered and a handler was installed, the handler is called with a
-// position and an error message. The position points to the beginning of
-// the offending token.
-//
-type ErrorHandler func(pos token.Position, msg string)
-
-// A Scanner holds the scanner's internal state while processing
-// a given text. It can be allocated as part of another data
-// structure but must be initialized via Init before use.
-//
-type Scanner struct {
- // immutable state
- file *token.File // source file handle
- dir string // directory portion of file.Name()
- src []byte // source
- err ErrorHandler // error reporting; or nil
- mode Mode // scanning mode
-
- // scanning state
- ch rune // current character
- offset int // character offset
- rdOffset int // reading offset (position after current character)
- lineOffset int // current line offset
- nextVal bool // next token is expected to be a value
-
- // public state - ok to modify
- ErrorCount int // number of errors encountered
-}
-
-// Read the next Unicode char into s.ch.
-// s.ch < 0 means end-of-file.
-//
-func (s *Scanner) next() {
- if s.rdOffset < len(s.src) {
- s.offset = s.rdOffset
- if s.ch == '\n' {
- s.lineOffset = s.offset
- s.file.AddLine(s.offset)
- }
- r, w := rune(s.src[s.rdOffset]), 1
- switch {
- case r == 0:
- s.error(s.offset, "illegal character NUL")
- case r >= 0x80:
- // not ASCII
- r, w = utf8.DecodeRune(s.src[s.rdOffset:])
- if r == utf8.RuneError && w == 1 {
- s.error(s.offset, "illegal UTF-8 encoding")
- }
- }
- s.rdOffset += w
- s.ch = r
- } else {
- s.offset = len(s.src)
- if s.ch == '\n' {
- s.lineOffset = s.offset
- s.file.AddLine(s.offset)
- }
- s.ch = -1 // eof
- }
-}
-
-// A mode value is a set of flags (or 0).
-// They control scanner behavior.
-//
-type Mode uint
-
-const (
- ScanComments Mode = 1 << iota // return comments as COMMENT tokens
-)
-
-// Init prepares the scanner s to tokenize the text src by setting the
-// scanner at the beginning of src. The scanner uses the file set file
-// for position information and it adds line information for each line.
-// It is ok to re-use the same file when re-scanning the same file as
-// line information which is already present is ignored. Init causes a
-// panic if the file size does not match the src size.
-//
-// Calls to Scan will invoke the error handler err if they encounter a
-// syntax error and err is not nil. Also, for each error encountered,
-// the Scanner field ErrorCount is incremented by one. The mode parameter
-// determines how comments are handled.
-//
-// Note that Init may call err if there is an error in the first character
-// of the file.
-//
-func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
- // Explicitly initialize all fields since a scanner may be reused.
- if file.Size() != len(src) {
- panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
- }
- s.file = file
- s.dir, _ = filepath.Split(file.Name())
- s.src = src
- s.err = err
- s.mode = mode
-
- s.ch = ' '
- s.offset = 0
- s.rdOffset = 0
- s.lineOffset = 0
- s.ErrorCount = 0
- s.nextVal = false
-
- s.next()
-}
-
-func (s *Scanner) error(offs int, msg string) {
- if s.err != nil {
- s.err(s.file.Position(s.file.Pos(offs)), msg)
- }
- s.ErrorCount++
-}
-
-func (s *Scanner) scanComment() string {
- // initial [;#] already consumed
- offs := s.offset - 1 // position of initial [;#]
-
- for s.ch != '\n' && s.ch >= 0 {
- s.next()
- }
- return string(s.src[offs:s.offset])
-}
-
-func isLetter(ch rune) bool {
- return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch)
-}
-
-func isDigit(ch rune) bool {
- return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
-}
-
-func (s *Scanner) scanIdentifier() string {
- offs := s.offset
- for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' {
- s.next()
- }
- return string(s.src[offs:s.offset])
-}
-
-func (s *Scanner) scanEscape(val bool) {
- offs := s.offset
- ch := s.ch
- s.next() // always make progress
- switch ch {
- case '\\', '"':
- // ok
- case 'n', 't':
- if val {
- break // ok
- }
- fallthrough
- default:
- s.error(offs, "unknown escape sequence")
- }
-}
-
-func (s *Scanner) scanString() string {
- // '"' opening already consumed
- offs := s.offset - 1
-
- for s.ch != '"' {
- ch := s.ch
- s.next()
- if ch == '\n' || ch < 0 {
- s.error(offs, "string not terminated")
- break
- }
- if ch == '\\' {
- s.scanEscape(false)
- }
- }
-
- s.next()
-
- return string(s.src[offs:s.offset])
-}
-
-func stripCR(b []byte) []byte {
- c := make([]byte, len(b))
- i := 0
- for _, ch := range b {
- if ch != '\r' {
- c[i] = ch
- i++
- }
- }
- return c[:i]
-}
-
-func (s *Scanner) scanValString() string {
- offs := s.offset
-
- hasCR := false
- end := offs
- inQuote := false
-loop:
- for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' {
- ch := s.ch
- s.next()
- switch {
- case inQuote && ch == '\\':
- s.scanEscape(true)
- case !inQuote && ch == '\\':
- if s.ch == '\r' {
- hasCR = true
- s.next()
- }
- if s.ch != '\n' {
- s.error(offs, "unquoted '\\' must be followed by new line")
- break loop
- }
- s.next()
- case ch == '"':
- inQuote = !inQuote
- case ch == '\r':
- hasCR = true
- case ch < 0 || inQuote && ch == '\n':
- s.error(offs, "string not terminated")
- break loop
- }
- if inQuote || !isWhiteSpace(ch) {
- end = s.offset
- }
- }
-
- lit := s.src[offs:end]
- if hasCR {
- lit = stripCR(lit)
- }
-
- return string(lit)
-}
-
-func isWhiteSpace(ch rune) bool {
- return ch == ' ' || ch == '\t' || ch == '\r'
-}
-
-func (s *Scanner) skipWhitespace() {
- for isWhiteSpace(s.ch) {
- s.next()
- }
-}
-
-// Scan scans the next token and returns the token position, the token,
-// and its literal string if applicable. The source end is indicated by
-// token.EOF.
-//
-// If the returned token is a literal (token.IDENT, token.STRING) or
-// token.COMMENT, the literal string has the corresponding value.
-//
-// If the returned token is token.ILLEGAL, the literal string is the
-// offending character.
-//
-// In all other cases, Scan returns an empty literal string.
-//
-// For more tolerant parsing, Scan will return a valid token if
-// possible even if a syntax error was encountered. Thus, even
-// if the resulting token sequence contains no illegal tokens,
-// a client may not assume that no error occurred. Instead it
-// must check the scanner's ErrorCount or the number of calls
-// of the error handler, if there was one installed.
-//
-// Scan adds line information to the file added to the file
-// set with Init. Token positions are relative to that file
-// and thus relative to the file set.
-//
-func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
-scanAgain:
- s.skipWhitespace()
-
- // current token start
- pos = s.file.Pos(s.offset)
-
- // determine token value
- switch ch := s.ch; {
- case s.nextVal:
- lit = s.scanValString()
- tok = token.STRING
- s.nextVal = false
- case isLetter(ch):
- lit = s.scanIdentifier()
- tok = token.IDENT
- default:
- s.next() // always make progress
- switch ch {
- case -1:
- tok = token.EOF
- case '\n':
- tok = token.EOL
- case '"':
- tok = token.STRING
- lit = s.scanString()
- case '[':
- tok = token.LBRACK
- case ']':
- tok = token.RBRACK
- case ';', '#':
- // comment
- lit = s.scanComment()
- if s.mode&ScanComments == 0 {
- // skip comment
- goto scanAgain
- }
- tok = token.COMMENT
- case '=':
- tok = token.ASSIGN
- s.nextVal = true
- default:
- s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch))
- tok = token.ILLEGAL
- lit = string(ch)
- }
- }
-
- return
-}
diff --git a/vendor/gopkg.in/gcfg.v1/set.go b/vendor/gopkg.in/gcfg.v1/set.go
deleted file mode 100644
index 7252b689..00000000
--- a/vendor/gopkg.in/gcfg.v1/set.go
+++ /dev/null
@@ -1,293 +0,0 @@
-package gcfg
-
-import (
- "fmt"
- "math/big"
- "reflect"
- "strings"
- "unicode"
- "unicode/utf8"
-
- "gopkg.in/gcfg.v1/types"
-)
-
-type tag struct {
- ident string
- intMode string
-}
-
-func newTag(ts string) tag {
- t := tag{}
- s := strings.Split(ts, ",")
- t.ident = s[0]
- for _, tse := range s[1:] {
- if strings.HasPrefix(tse, "int=") {
- t.intMode = tse[len("int="):]
- }
- }
- return t
-}
-
-func fieldFold(v reflect.Value, name string) (reflect.Value, tag) {
- var n string
- r0, _ := utf8.DecodeRuneInString(name)
- if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) {
- n = "X"
- }
- n += strings.Replace(name, "-", "_", -1)
- f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool {
- if !v.FieldByName(fieldName).CanSet() {
- return false
- }
- f, _ := v.Type().FieldByName(fieldName)
- t := newTag(f.Tag.Get("gcfg"))
- if t.ident != "" {
- return strings.EqualFold(t.ident, name)
- }
- return strings.EqualFold(n, fieldName)
- })
- if !ok {
- return reflect.Value{}, tag{}
- }
- return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg"))
-}
-
-type setter func(destp interface{}, blank bool, val string, t tag) error
-
-var errUnsupportedType = fmt.Errorf("unsupported type")
-var errBlankUnsupported = fmt.Errorf("blank value not supported for type")
-
-var setters = []setter{
- typeSetter, textUnmarshalerSetter, kindSetter, scanSetter,
-}
-
-func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error {
- dtu, ok := d.(textUnmarshaler)
- if !ok {
- return errUnsupportedType
- }
- if blank {
- return errBlankUnsupported
- }
- return dtu.UnmarshalText([]byte(val))
-}
-
-func boolSetter(d interface{}, blank bool, val string, t tag) error {
- if blank {
- reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true))
- return nil
- }
- b, err := types.ParseBool(val)
- if err == nil {
- reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b))
- }
- return err
-}
-
-func intMode(mode string) types.IntMode {
- var m types.IntMode
- if strings.ContainsAny(mode, "dD") {
- m |= types.Dec
- }
- if strings.ContainsAny(mode, "hH") {
- m |= types.Hex
- }
- if strings.ContainsAny(mode, "oO") {
- m |= types.Oct
- }
- return m
-}
-
-var typeModes = map[reflect.Type]types.IntMode{
- reflect.TypeOf(int(0)): types.Dec | types.Hex,
- reflect.TypeOf(int8(0)): types.Dec | types.Hex,
- reflect.TypeOf(int16(0)): types.Dec | types.Hex,
- reflect.TypeOf(int32(0)): types.Dec | types.Hex,
- reflect.TypeOf(int64(0)): types.Dec | types.Hex,
- reflect.TypeOf(uint(0)): types.Dec | types.Hex,
- reflect.TypeOf(uint8(0)): types.Dec | types.Hex,
- reflect.TypeOf(uint16(0)): types.Dec | types.Hex,
- reflect.TypeOf(uint32(0)): types.Dec | types.Hex,
- reflect.TypeOf(uint64(0)): types.Dec | types.Hex,
- // use default mode (allow dec/hex/oct) for uintptr type
- reflect.TypeOf(big.Int{}): types.Dec | types.Hex,
-}
-
-func intModeDefault(t reflect.Type) types.IntMode {
- m, ok := typeModes[t]
- if !ok {
- m = types.Dec | types.Hex | types.Oct
- }
- return m
-}
-
-func intSetter(d interface{}, blank bool, val string, t tag) error {
- if blank {
- return errBlankUnsupported
- }
- mode := intMode(t.intMode)
- if mode == 0 {
- mode = intModeDefault(reflect.TypeOf(d).Elem())
- }
- return types.ParseInt(d, val, mode)
-}
-
-func stringSetter(d interface{}, blank bool, val string, t tag) error {
- if blank {
- return errBlankUnsupported
- }
- dsp, ok := d.(*string)
- if !ok {
- return errUnsupportedType
- }
- *dsp = val
- return nil
-}
-
-var kindSetters = map[reflect.Kind]setter{
- reflect.String: stringSetter,
- reflect.Bool: boolSetter,
- reflect.Int: intSetter,
- reflect.Int8: intSetter,
- reflect.Int16: intSetter,
- reflect.Int32: intSetter,
- reflect.Int64: intSetter,
- reflect.Uint: intSetter,
- reflect.Uint8: intSetter,
- reflect.Uint16: intSetter,
- reflect.Uint32: intSetter,
- reflect.Uint64: intSetter,
- reflect.Uintptr: intSetter,
-}
-
-var typeSetters = map[reflect.Type]setter{
- reflect.TypeOf(big.Int{}): intSetter,
-}
-
-func typeSetter(d interface{}, blank bool, val string, tt tag) error {
- t := reflect.ValueOf(d).Type().Elem()
- setter, ok := typeSetters[t]
- if !ok {
- return errUnsupportedType
- }
- return setter(d, blank, val, tt)
-}
-
-func kindSetter(d interface{}, blank bool, val string, tt tag) error {
- k := reflect.ValueOf(d).Type().Elem().Kind()
- setter, ok := kindSetters[k]
- if !ok {
- return errUnsupportedType
- }
- return setter(d, blank, val, tt)
-}
-
-func scanSetter(d interface{}, blank bool, val string, tt tag) error {
- if blank {
- return errBlankUnsupported
- }
- return types.ScanFully(d, val, 'v')
-}
-
-func set(cfg interface{}, sect, sub, name string, blank bool, value string) error {
- vPCfg := reflect.ValueOf(cfg)
- if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct {
- panic(fmt.Errorf("config must be a pointer to a struct"))
- }
- vCfg := vPCfg.Elem()
- vSect, _ := fieldFold(vCfg, sect)
- if !vSect.IsValid() {
- return fmt.Errorf("invalid section: section %q", sect)
- }
- if vSect.Kind() == reflect.Map {
- vst := vSect.Type()
- if vst.Key().Kind() != reflect.String ||
- vst.Elem().Kind() != reflect.Ptr ||
- vst.Elem().Elem().Kind() != reflect.Struct {
- panic(fmt.Errorf("map field for section must have string keys and "+
- " pointer-to-struct values: section %q", sect))
- }
- if vSect.IsNil() {
- vSect.Set(reflect.MakeMap(vst))
- }
- k := reflect.ValueOf(sub)
- pv := vSect.MapIndex(k)
- if !pv.IsValid() {
- vType := vSect.Type().Elem().Elem()
- pv = reflect.New(vType)
- vSect.SetMapIndex(k, pv)
- }
- vSect = pv.Elem()
- } else if vSect.Kind() != reflect.Struct {
- panic(fmt.Errorf("field for section must be a map or a struct: "+
- "section %q", sect))
- } else if sub != "" {
- return fmt.Errorf("invalid subsection: "+
- "section %q subsection %q", sect, sub)
- }
- // Empty name is a special value, meaning that only the
- // section/subsection object is to be created, with no values set.
- if name == "" {
- return nil
- }
- vVar, t := fieldFold(vSect, name)
- if !vVar.IsValid() {
- return fmt.Errorf("invalid variable: "+
- "section %q subsection %q variable %q", sect, sub, name)
- }
- // vVal is either single-valued var, or newly allocated value within multi-valued var
- var vVal reflect.Value
- // multi-value if unnamed slice type
- isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice ||
- vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice
- if isMulti && vVar.Kind() == reflect.Ptr {
- if vVar.IsNil() {
- vVar.Set(reflect.New(vVar.Type().Elem()))
- }
- vVar = vVar.Elem()
- }
- if isMulti && blank {
- vVar.Set(reflect.Zero(vVar.Type()))
- return nil
- }
- if isMulti {
- vVal = reflect.New(vVar.Type().Elem()).Elem()
- } else {
- vVal = vVar
- }
- isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr
- isNew := isDeref && vVal.IsNil()
- // vAddr is address of value to set (dereferenced & allocated as needed)
- var vAddr reflect.Value
- switch {
- case isNew:
- vAddr = reflect.New(vVal.Type().Elem())
- case isDeref && !isNew:
- vAddr = vVal
- default:
- vAddr = vVal.Addr()
- }
- vAddrI := vAddr.Interface()
- err, ok := error(nil), false
- for _, s := range setters {
- err = s(vAddrI, blank, value, t)
- if err == nil {
- ok = true
- break
- }
- if err != errUnsupportedType {
- return err
- }
- }
- if !ok {
- // in case all setters returned errUnsupportedType
- return err
- }
- if isNew { // set reference if it was dereferenced and newly allocated
- vVal.Set(vAddr)
- }
- if isMulti { // append if multi-valued
- vVar.Set(reflect.Append(vVar, vVal))
- }
- return nil
-}
diff --git a/vendor/gopkg.in/gcfg.v1/token/position.go b/vendor/gopkg.in/gcfg.v1/token/position.go
deleted file mode 100644
index fc45c1e7..00000000
--- a/vendor/gopkg.in/gcfg.v1/token/position.go
+++ /dev/null
@@ -1,435 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// TODO(gri) consider making this a separate package outside the go directory.
-
-package token
-
-import (
- "fmt"
- "sort"
- "sync"
-)
-
-// -----------------------------------------------------------------------------
-// Positions
-
-// Position describes an arbitrary source position
-// including the file, line, and column location.
-// A Position is valid if the line number is > 0.
-//
-type Position struct {
- Filename string // filename, if any
- Offset int // offset, starting at 0
- Line int // line number, starting at 1
- Column int // column number, starting at 1 (character count)
-}
-
-// IsValid returns true if the position is valid.
-func (pos *Position) IsValid() bool { return pos.Line > 0 }
-
-// String returns a string in one of several forms:
-//
-// file:line:column valid position with file name
-// line:column valid position without file name
-// file invalid position with file name
-// - invalid position without file name
-//
-func (pos Position) String() string {
- s := pos.Filename
- if pos.IsValid() {
- if s != "" {
- s += ":"
- }
- s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
- }
- if s == "" {
- s = "-"
- }
- return s
-}
-
-// Pos is a compact encoding of a source position within a file set.
-// It can be converted into a Position for a more convenient, but much
-// larger, representation.
-//
-// The Pos value for a given file is a number in the range [base, base+size],
-// where base and size are specified when adding the file to the file set via
-// AddFile.
-//
-// To create the Pos value for a specific source offset, first add
-// the respective file to the current file set (via FileSet.AddFile)
-// and then call File.Pos(offset) for that file. Given a Pos value p
-// for a specific file set fset, the corresponding Position value is
-// obtained by calling fset.Position(p).
-//
-// Pos values can be compared directly with the usual comparison operators:
-// If two Pos values p and q are in the same file, comparing p and q is
-// equivalent to comparing the respective source file offsets. If p and q
-// are in different files, p < q is true if the file implied by p was added
-// to the respective file set before the file implied by q.
-//
-type Pos int
-
-// The zero value for Pos is NoPos; there is no file and line information
-// associated with it, and NoPos().IsValid() is false. NoPos is always
-// smaller than any other Pos value. The corresponding Position value
-// for NoPos is the zero value for Position.
-//
-const NoPos Pos = 0
-
-// IsValid returns true if the position is valid.
-func (p Pos) IsValid() bool {
- return p != NoPos
-}
-
-// -----------------------------------------------------------------------------
-// File
-
-// A File is a handle for a file belonging to a FileSet.
-// A File has a name, size, and line offset table.
-//
-type File struct {
- set *FileSet
- name string // file name as provided to AddFile
- base int // Pos value range for this file is [base...base+size]
- size int // file size as provided to AddFile
-
- // lines and infos are protected by set.mutex
- lines []int
- infos []lineInfo
-}
-
-// Name returns the file name of file f as registered with AddFile.
-func (f *File) Name() string {
- return f.name
-}
-
-// Base returns the base offset of file f as registered with AddFile.
-func (f *File) Base() int {
- return f.base
-}
-
-// Size returns the size of file f as registered with AddFile.
-func (f *File) Size() int {
- return f.size
-}
-
-// LineCount returns the number of lines in file f.
-func (f *File) LineCount() int {
- f.set.mutex.RLock()
- n := len(f.lines)
- f.set.mutex.RUnlock()
- return n
-}
-
-// AddLine adds the line offset for a new line.
-// The line offset must be larger than the offset for the previous line
-// and smaller than the file size; otherwise the line offset is ignored.
-//
-func (f *File) AddLine(offset int) {
- f.set.mutex.Lock()
- if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size {
- f.lines = append(f.lines, offset)
- }
- f.set.mutex.Unlock()
-}
-
-// SetLines sets the line offsets for a file and returns true if successful.
-// The line offsets are the offsets of the first character of each line;
-// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
-// An empty file has an empty line offset table.
-// Each line offset must be larger than the offset for the previous line
-// and smaller than the file size; otherwise SetLines fails and returns
-// false.
-//
-func (f *File) SetLines(lines []int) bool {
- // verify validity of lines table
- size := f.size
- for i, offset := range lines {
- if i > 0 && offset <= lines[i-1] || size <= offset {
- return false
- }
- }
-
- // set lines table
- f.set.mutex.Lock()
- f.lines = lines
- f.set.mutex.Unlock()
- return true
-}
-
-// SetLinesForContent sets the line offsets for the given file content.
-func (f *File) SetLinesForContent(content []byte) {
- var lines []int
- line := 0
- for offset, b := range content {
- if line >= 0 {
- lines = append(lines, line)
- }
- line = -1
- if b == '\n' {
- line = offset + 1
- }
- }
-
- // set lines table
- f.set.mutex.Lock()
- f.lines = lines
- f.set.mutex.Unlock()
-}
-
-// A lineInfo object describes alternative file and line number
-// information (such as provided via a //line comment in a .go
-// file) for a given file offset.
-type lineInfo struct {
- // fields are exported to make them accessible to gob
- Offset int
- Filename string
- Line int
-}
-
-// AddLineInfo adds alternative file and line number information for
-// a given file offset. The offset must be larger than the offset for
-// the previously added alternative line info and smaller than the
-// file size; otherwise the information is ignored.
-//
-// AddLineInfo is typically used to register alternative position
-// information for //line filename:line comments in source files.
-//
-func (f *File) AddLineInfo(offset int, filename string, line int) {
- f.set.mutex.Lock()
- if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
- f.infos = append(f.infos, lineInfo{offset, filename, line})
- }
- f.set.mutex.Unlock()
-}
-
-// Pos returns the Pos value for the given file offset;
-// the offset must be <= f.Size().
-// f.Pos(f.Offset(p)) == p.
-//
-func (f *File) Pos(offset int) Pos {
- if offset > f.size {
- panic("illegal file offset")
- }
- return Pos(f.base + offset)
-}
-
-// Offset returns the offset for the given file position p;
-// p must be a valid Pos value in that file.
-// f.Offset(f.Pos(offset)) == offset.
-//
-func (f *File) Offset(p Pos) int {
- if int(p) < f.base || int(p) > f.base+f.size {
- panic("illegal Pos value")
- }
- return int(p) - f.base
-}
-
-// Line returns the line number for the given file position p;
-// p must be a Pos value in that file or NoPos.
-//
-func (f *File) Line(p Pos) int {
- // TODO(gri) this can be implemented much more efficiently
- return f.Position(p).Line
-}
-
-func searchLineInfos(a []lineInfo, x int) int {
- return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
-}
-
-// info returns the file name, line, and column number for a file offset.
-func (f *File) info(offset int) (filename string, line, column int) {
- filename = f.name
- if i := searchInts(f.lines, offset); i >= 0 {
- line, column = i+1, offset-f.lines[i]+1
- }
- if len(f.infos) > 0 {
- // almost no files have extra line infos
- if i := searchLineInfos(f.infos, offset); i >= 0 {
- alt := &f.infos[i]
- filename = alt.Filename
- if i := searchInts(f.lines, alt.Offset); i >= 0 {
- line += alt.Line - i - 1
- }
- }
- }
- return
-}
-
-func (f *File) position(p Pos) (pos Position) {
- offset := int(p) - f.base
- pos.Offset = offset
- pos.Filename, pos.Line, pos.Column = f.info(offset)
- return
-}
-
-// Position returns the Position value for the given file position p;
-// p must be a Pos value in that file or NoPos.
-//
-func (f *File) Position(p Pos) (pos Position) {
- if p != NoPos {
- if int(p) < f.base || int(p) > f.base+f.size {
- panic("illegal Pos value")
- }
- pos = f.position(p)
- }
- return
-}
-
-// -----------------------------------------------------------------------------
-// FileSet
-
-// A FileSet represents a set of source files.
-// Methods of file sets are synchronized; multiple goroutines
-// may invoke them concurrently.
-//
-type FileSet struct {
- mutex sync.RWMutex // protects the file set
- base int // base offset for the next file
- files []*File // list of files in the order added to the set
- last *File // cache of last file looked up
-}
-
-// NewFileSet creates a new file set.
-func NewFileSet() *FileSet {
- s := new(FileSet)
- s.base = 1 // 0 == NoPos
- return s
-}
-
-// Base returns the minimum base offset that must be provided to
-// AddFile when adding the next file.
-//
-func (s *FileSet) Base() int {
- s.mutex.RLock()
- b := s.base
- s.mutex.RUnlock()
- return b
-
-}
-
-// AddFile adds a new file with a given filename, base offset, and file size
-// to the file set s and returns the file. Multiple files may have the same
-// name. The base offset must not be smaller than the FileSet's Base(), and
-// size must not be negative.
-//
-// Adding the file will set the file set's Base() value to base + size + 1
-// as the minimum base value for the next file. The following relationship
-// exists between a Pos value p for a given file offset offs:
-//
-// int(p) = base + offs
-//
-// with offs in the range [0, size] and thus p in the range [base, base+size].
-// For convenience, File.Pos may be used to create file-specific position
-// values from a file offset.
-//
-func (s *FileSet) AddFile(filename string, base, size int) *File {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- if base < s.base || size < 0 {
- panic("illegal base or size")
- }
- // base >= s.base && size >= 0
- f := &File{s, filename, base, size, []int{0}, nil}
- base += size + 1 // +1 because EOF also has a position
- if base < 0 {
- panic("token.Pos offset overflow (> 2G of source code in file set)")
- }
- // add the file to the file set
- s.base = base
- s.files = append(s.files, f)
- s.last = f
- return f
-}
-
-// Iterate calls f for the files in the file set in the order they were added
-// until f returns false.
-//
-func (s *FileSet) Iterate(f func(*File) bool) {
- for i := 0; ; i++ {
- var file *File
- s.mutex.RLock()
- if i < len(s.files) {
- file = s.files[i]
- }
- s.mutex.RUnlock()
- if file == nil || !f(file) {
- break
- }
- }
-}
-
-func searchFiles(a []*File, x int) int {
- return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
-}
-
-func (s *FileSet) file(p Pos) *File {
- // common case: p is in last file
- if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
- return f
- }
- // p is not in last file - search all files
- if i := searchFiles(s.files, int(p)); i >= 0 {
- f := s.files[i]
- // f.base <= int(p) by definition of searchFiles
- if int(p) <= f.base+f.size {
- s.last = f
- return f
- }
- }
- return nil
-}
-
-// File returns the file that contains the position p.
-// If no such file is found (for instance for p == NoPos),
-// the result is nil.
-//
-func (s *FileSet) File(p Pos) (f *File) {
- if p != NoPos {
- s.mutex.RLock()
- f = s.file(p)
- s.mutex.RUnlock()
- }
- return
-}
-
-// Position converts a Pos in the fileset into a general Position.
-func (s *FileSet) Position(p Pos) (pos Position) {
- if p != NoPos {
- s.mutex.RLock()
- if f := s.file(p); f != nil {
- pos = f.position(p)
- }
- s.mutex.RUnlock()
- }
- return
-}
-
-// -----------------------------------------------------------------------------
-// Helper functions
-
-func searchInts(a []int, x int) int {
- // This function body is a manually inlined version of:
- //
- // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
- //
- // With better compiler optimizations, this may not be needed in the
- // future, but at the moment this change improves the go/printer
- // benchmark performance by ~30%. This has a direct impact on the
- // speed of gofmt and thus seems worthwhile (2011-04-29).
- // TODO(gri): Remove this when compilers have caught up.
- i, j := 0, len(a)
- for i < j {
- h := i + (j-i)/2 // avoid overflow when computing h
- // i ≤ h < j
- if a[h] <= x {
- i = h + 1
- } else {
- j = h
- }
- }
- return i - 1
-}
diff --git a/vendor/gopkg.in/gcfg.v1/token/serialize.go b/vendor/gopkg.in/gcfg.v1/token/serialize.go
deleted file mode 100644
index 4adc8f9e..00000000
--- a/vendor/gopkg.in/gcfg.v1/token/serialize.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package token
-
-type serializedFile struct {
- // fields correspond 1:1 to fields with same (lower-case) name in File
- Name string
- Base int
- Size int
- Lines []int
- Infos []lineInfo
-}
-
-type serializedFileSet struct {
- Base int
- Files []serializedFile
-}
-
-// Read calls decode to deserialize a file set into s; s must not be nil.
-func (s *FileSet) Read(decode func(interface{}) error) error {
- var ss serializedFileSet
- if err := decode(&ss); err != nil {
- return err
- }
-
- s.mutex.Lock()
- s.base = ss.Base
- files := make([]*File, len(ss.Files))
- for i := 0; i < len(ss.Files); i++ {
- f := &ss.Files[i]
- files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos}
- }
- s.files = files
- s.last = nil
- s.mutex.Unlock()
-
- return nil
-}
-
-// Write calls encode to serialize the file set s.
-func (s *FileSet) Write(encode func(interface{}) error) error {
- var ss serializedFileSet
-
- s.mutex.Lock()
- ss.Base = s.base
- files := make([]serializedFile, len(s.files))
- for i, f := range s.files {
- files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos}
- }
- ss.Files = files
- s.mutex.Unlock()
-
- return encode(ss)
-}
diff --git a/vendor/gopkg.in/gcfg.v1/token/token.go b/vendor/gopkg.in/gcfg.v1/token/token.go
deleted file mode 100644
index b3c7c83f..00000000
--- a/vendor/gopkg.in/gcfg.v1/token/token.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package token defines constants representing the lexical tokens of the gcfg
-// configuration syntax and basic operations on tokens (printing, predicates).
-//
-// Note that the API for the token package may change to accommodate new
-// features or implementation changes in gcfg.
-//
-package token
-
-import "strconv"
-
-// Token is the set of lexical tokens of the gcfg configuration syntax.
-type Token int
-
-// The list of tokens.
-const (
- // Special tokens
- ILLEGAL Token = iota
- EOF
- COMMENT
-
- literal_beg
- // Identifiers and basic type literals
- // (these tokens stand for classes of literals)
- IDENT // section-name, variable-name
- STRING // "subsection-name", variable value
- literal_end
-
- operator_beg
- // Operators and delimiters
- ASSIGN // =
- LBRACK // [
- RBRACK // ]
- EOL // \n
- operator_end
-)
-
-var tokens = [...]string{
- ILLEGAL: "ILLEGAL",
-
- EOF: "EOF",
- COMMENT: "COMMENT",
-
- IDENT: "IDENT",
- STRING: "STRING",
-
- ASSIGN: "=",
- LBRACK: "[",
- RBRACK: "]",
- EOL: "\n",
-}
-
-// String returns the string corresponding to the token tok.
-// For operators and delimiters, the string is the actual token character
-// sequence (e.g., for the token ASSIGN, the string is "="). For all other
-// tokens the string corresponds to the token constant name (e.g. for the
-// token IDENT, the string is "IDENT").
-//
-func (tok Token) String() string {
- s := ""
- if 0 <= tok && tok < Token(len(tokens)) {
- s = tokens[tok]
- }
- if s == "" {
- s = "token(" + strconv.Itoa(int(tok)) + ")"
- }
- return s
-}
-
-// Predicates
-
-// IsLiteral returns true for tokens corresponding to identifiers
-// and basic type literals; it returns false otherwise.
-//
-func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
-
-// IsOperator returns true for tokens corresponding to operators and
-// delimiters; it returns false otherwise.
-//
-func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }
diff --git a/vendor/gopkg.in/gcfg.v1/types/bool.go b/vendor/gopkg.in/gcfg.v1/types/bool.go
deleted file mode 100644
index 8dcae0d8..00000000
--- a/vendor/gopkg.in/gcfg.v1/types/bool.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package types
-
-// BoolValues defines the name and value mappings for ParseBool.
-var BoolValues = map[string]interface{}{
- "true": true, "yes": true, "on": true, "1": true,
- "false": false, "no": false, "off": false, "0": false,
-}
-
-var boolParser = func() *EnumParser {
- ep := &EnumParser{}
- ep.AddVals(BoolValues)
- return ep
-}()
-
-// ParseBool parses bool values according to the definitions in BoolValues.
-// Parsing is case-insensitive.
-func ParseBool(s string) (bool, error) {
- v, err := boolParser.Parse(s)
- if err != nil {
- return false, err
- }
- return v.(bool), nil
-}
diff --git a/vendor/gopkg.in/gcfg.v1/types/doc.go b/vendor/gopkg.in/gcfg.v1/types/doc.go
deleted file mode 100644
index 9f9c345f..00000000
--- a/vendor/gopkg.in/gcfg.v1/types/doc.go
+++ /dev/null
@@ -1,4 +0,0 @@
-// Package types defines helpers for type conversions.
-//
-// The API for this package is not finalized yet.
-package types
diff --git a/vendor/gopkg.in/gcfg.v1/types/enum.go b/vendor/gopkg.in/gcfg.v1/types/enum.go
deleted file mode 100644
index 1a0c7ef4..00000000
--- a/vendor/gopkg.in/gcfg.v1/types/enum.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package types
-
-import (
- "fmt"
- "reflect"
- "strings"
-)
-
-// EnumParser parses "enum" values; i.e. a predefined set of strings to
-// predefined values.
-type EnumParser struct {
- Type string // type name; if not set, use type of first value added
- CaseMatch bool // if true, matching of strings is case-sensitive
- // PrefixMatch bool
- vals map[string]interface{}
-}
-
-// AddVals adds strings and values to an EnumParser.
-func (ep *EnumParser) AddVals(vals map[string]interface{}) {
- if ep.vals == nil {
- ep.vals = make(map[string]interface{})
- }
- for k, v := range vals {
- if ep.Type == "" {
- ep.Type = reflect.TypeOf(v).Name()
- }
- if !ep.CaseMatch {
- k = strings.ToLower(k)
- }
- ep.vals[k] = v
- }
-}
-
-// Parse parses the string and returns the value or an error.
-func (ep EnumParser) Parse(s string) (interface{}, error) {
- if !ep.CaseMatch {
- s = strings.ToLower(s)
- }
- v, ok := ep.vals[s]
- if !ok {
- return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s)
- }
- return v, nil
-}
diff --git a/vendor/gopkg.in/gcfg.v1/types/int.go b/vendor/gopkg.in/gcfg.v1/types/int.go
deleted file mode 100644
index af7e75c1..00000000
--- a/vendor/gopkg.in/gcfg.v1/types/int.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package types
-
-import (
- "fmt"
- "strings"
-)
-
-// An IntMode is a mode for parsing integer values, representing a set of
-// accepted bases.
-type IntMode uint8
-
-// IntMode values for ParseInt; can be combined using binary or.
-const (
- Dec IntMode = 1 << iota
- Hex
- Oct
-)
-
-// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`.
-func (m IntMode) String() string {
- var modes []string
- if m&Dec != 0 {
- modes = append(modes, "Dec")
- }
- if m&Hex != 0 {
- modes = append(modes, "Hex")
- }
- if m&Oct != 0 {
- modes = append(modes, "Oct")
- }
- return "IntMode(" + strings.Join(modes, "|") + ")"
-}
-
-var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix")
-
-func prefix0(val string) bool {
- return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0")
-}
-
-func prefix0x(val string) bool {
- return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x")
-}
-
-// ParseInt parses val using mode into intptr, which must be a pointer to an
-// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases
-// when mode permits ambiguity of base; otherwise the prefix can be omitted.
-func ParseInt(intptr interface{}, val string, mode IntMode) error {
- val = strings.TrimSpace(val)
- verb := byte(0)
- switch mode {
- case Dec:
- verb = 'd'
- case Dec + Hex:
- if prefix0x(val) {
- verb = 'v'
- } else {
- verb = 'd'
- }
- case Dec + Oct:
- if prefix0(val) && !prefix0x(val) {
- verb = 'v'
- } else {
- verb = 'd'
- }
- case Dec + Hex + Oct:
- verb = 'v'
- case Hex:
- if prefix0x(val) {
- verb = 'v'
- } else {
- verb = 'x'
- }
- case Oct:
- verb = 'o'
- case Hex + Oct:
- if prefix0(val) {
- verb = 'v'
- } else {
- return errIntAmbig
- }
- }
- if verb == 0 {
- panic("unsupported mode")
- }
- return ScanFully(intptr, val, verb)
-}
diff --git a/vendor/gopkg.in/gcfg.v1/types/scan.go b/vendor/gopkg.in/gcfg.v1/types/scan.go
deleted file mode 100644
index db2f6ed3..00000000
--- a/vendor/gopkg.in/gcfg.v1/types/scan.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package types
-
-import (
- "fmt"
- "io"
- "reflect"
-)
-
-// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr.
-func ScanFully(ptr interface{}, val string, verb byte) error {
- t := reflect.ValueOf(ptr).Elem().Type()
- // attempt to read extra bytes to make sure the value is consumed
- var b []byte
- n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b)
- switch {
- case n < 1 || n == 1 && err != io.EOF:
- return fmt.Errorf("failed to parse %q as %v: %v", val, t, err)
- case n > 1:
- return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b))
- }
- // n == 1 && err == io.EOF
- return nil
-}
diff --git a/vendor/manifest b/vendor/manifest
index f3f8b159..ef9cc7d9 100644
--- a/vendor/manifest
+++ b/vendor/manifest
@@ -20,6 +20,14 @@
"notests": true
},
{
+ "importpath": "github.com/BurntSushi/toml",
+ "repository": "https://github.com/BurntSushi/toml",
+ "vcs": "git",
+ "revision": "99064174e013895bbd9b025c31100bd1d9b590ca",
+ "branch": "master",
+ "notests": true
+ },
+ {
"importpath": "github.com/Sirupsen/logrus",
"repository": "https://github.com/Sirupsen/logrus",
"vcs": "",
@@ -204,14 +212,6 @@
"notests": true
},
{
- "importpath": "gopkg.in/gcfg.v1",
- "repository": "https://gopkg.in/gcfg.v1",
- "vcs": "",
- "revision": "083575c3955c85df16fe9590cceab64d03f5eb6e",
- "branch": "master",
- "notests": true
- },
- {
"importpath": "gopkg.in/yaml.v2",
"repository": "https://gopkg.in/yaml.v2",
"vcs": "",