diff options
author | Wim <wim@42.be> | 2020-12-31 14:48:12 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-12-31 14:48:12 +0100 |
commit | 4f20ebead36876a88391bf033d1de3e4cf0228da (patch) | |
tree | 70b6fd79c6a5e00c958c29a7bd3926f074e76ba6 /vendor/github.com/pelletier/go-toml/lexer.go | |
parent | a9f89dbc645aafc68daa9fc8d589f55104b535c7 (diff) | |
download | matterbridge-msglm-4f20ebead36876a88391bf033d1de3e4cf0228da.tar.gz matterbridge-msglm-4f20ebead36876a88391bf033d1de3e4cf0228da.tar.bz2 matterbridge-msglm-4f20ebead36876a88391bf033d1de3e4cf0228da.zip |
Update vendor for next release (#1343)
Diffstat (limited to 'vendor/github.com/pelletier/go-toml/lexer.go')
-rw-r--r-- | vendor/github.com/pelletier/go-toml/lexer.go | 100 |
1 files changed, 53 insertions, 47 deletions
diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go index 425e847a..b1886192 100644 --- a/vendor/github.com/pelletier/go-toml/lexer.go +++ b/vendor/github.com/pelletier/go-toml/lexer.go @@ -306,7 +306,7 @@ func (l *tomlLexer) lexComma() tomlLexStateFn { // Parse the key and emits its value without escape sequences. // bare keys, basic string keys and literal string keys are supported. func (l *tomlLexer) lexKey() tomlLexStateFn { - growingString := "" + var sb strings.Builder for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { if r == '"' { @@ -315,7 +315,9 @@ func (l *tomlLexer) lexKey() tomlLexStateFn { if err != nil { return l.errorf(err.Error()) } - growingString += "\"" + str + "\"" + sb.WriteString("\"") + sb.WriteString(str) + sb.WriteString("\"") l.next() continue } else if r == '\'' { @@ -324,41 +326,45 @@ func (l *tomlLexer) lexKey() tomlLexStateFn { if err != nil { return l.errorf(err.Error()) } - growingString += "'" + str + "'" + sb.WriteString("'") + sb.WriteString(str) + sb.WriteString("'") l.next() continue } else if r == '\n' { return l.errorf("keys cannot contain new lines") } else if isSpace(r) { - str := " " + var str strings.Builder + str.WriteString(" ") + // skip trailing whitespace l.next() for r = l.peek(); isSpace(r); r = l.peek() { - str += string(r) + str.WriteRune(r) l.next() } // break loop if not a dot if r != '.' { break } - str += "." + str.WriteString(".") // skip trailing whitespace after dot l.next() for r = l.peek(); isSpace(r); r = l.peek() { - str += string(r) + str.WriteRune(r) l.next() } - growingString += str + sb.WriteString(str.String()) continue } else if r == '.' { // skip } else if !isValidBareChar(r) { return l.errorf("keys cannot contain %c character", r) } - growingString += string(r) + sb.WriteRune(r) l.next() } - l.emitWithValue(tokenKey, growingString) + l.emitWithValue(tokenKey, sb.String()) return l.lexVoid } @@ -383,7 +389,7 @@ func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { } func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { - growingString := "" + var sb strings.Builder if discardLeadingNewLine { if l.follow("\r\n") { @@ -397,14 +403,14 @@ func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNe // find end of string for { if l.follow(terminator) { - return growingString, nil + return sb.String(), nil } next := l.peek() if next == eof { break } - growingString += string(l.next()) + sb.WriteRune(l.next()) } return "", errors.New("unclosed string") @@ -438,7 +444,7 @@ func (l *tomlLexer) lexLiteralString() tomlLexStateFn { // Terminator is the substring indicating the end of the token. // The resulting string does not include the terminator. func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { - growingString := "" + var sb strings.Builder if discardLeadingNewLine { if l.follow("\r\n") { @@ -451,7 +457,7 @@ func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, for { if l.follow(terminator) { - return growingString, nil + return sb.String(), nil } if l.follow("\\") { @@ -469,61 +475,61 @@ func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, l.next() } case '"': - growingString += "\"" + sb.WriteString("\"") l.next() case 'n': - growingString += "\n" + sb.WriteString("\n") l.next() case 'b': - growingString += "\b" + sb.WriteString("\b") l.next() case 'f': - growingString += "\f" + sb.WriteString("\f") l.next() case '/': - growingString += "/" + sb.WriteString("/") l.next() case 't': - growingString += "\t" + sb.WriteString("\t") l.next() case 'r': - growingString += "\r" + sb.WriteString("\r") l.next() case '\\': - growingString += "\\" + sb.WriteString("\\") l.next() case 'u': l.next() - code := "" + var code strings.Builder for i := 0; i < 4; i++ { c := l.peek() if !isHexDigit(c) { return "", errors.New("unfinished unicode escape") } l.next() - code = code + string(c) + code.WriteRune(c) } - intcode, err := strconv.ParseInt(code, 16, 32) + intcode, err := strconv.ParseInt(code.String(), 16, 32) if err != nil { - return "", errors.New("invalid unicode escape: \\u" + code) + return "", errors.New("invalid unicode escape: \\u" + code.String()) } - growingString += string(rune(intcode)) + sb.WriteRune(rune(intcode)) case 'U': l.next() - code := "" + var code strings.Builder for i := 0; i < 8; i++ { c := l.peek() if !isHexDigit(c) { return "", errors.New("unfinished unicode escape") } l.next() - code = code + string(c) + code.WriteRune(c) } - intcode, err := strconv.ParseInt(code, 16, 64) + intcode, err := strconv.ParseInt(code.String(), 16, 64) if err != nil { - return "", errors.New("invalid unicode escape: \\U" + code) + return "", errors.New("invalid unicode escape: \\U" + code.String()) } - growingString += string(rune(intcode)) + sb.WriteRune(rune(intcode)) default: return "", errors.New("invalid escape sequence: \\" + string(l.peek())) } @@ -534,7 +540,7 @@ func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, return "", fmt.Errorf("unescaped control character %U", r) } l.next() - growingString += string(r) + sb.WriteRune(r) } if l.peek() == eof { @@ -769,19 +775,19 @@ func init() { // /!\ also matches the empty string // // Example matches: - //1979-05-27T07:32:00Z - //1979-05-27T00:32:00-07:00 - //1979-05-27T00:32:00.999999-07:00 - //1979-05-27 07:32:00Z - //1979-05-27 00:32:00-07:00 - //1979-05-27 00:32:00.999999-07:00 - //1979-05-27T07:32:00 - //1979-05-27T00:32:00.999999 - //1979-05-27 07:32:00 - //1979-05-27 00:32:00.999999 - //1979-05-27 - //07:32:00 - //00:32:00.999999 + // 1979-05-27T07:32:00Z + // 1979-05-27T00:32:00-07:00 + // 1979-05-27T00:32:00.999999-07:00 + // 1979-05-27 07:32:00Z + // 1979-05-27 00:32:00-07:00 + // 1979-05-27 00:32:00.999999-07:00 + // 1979-05-27T07:32:00 + // 1979-05-27T00:32:00.999999 + // 1979-05-27 07:32:00 + // 1979-05-27 00:32:00.999999 + // 1979-05-27 + // 07:32:00 + // 00:32:00.999999 dateRegexp = regexp.MustCompile(`^(?:\d{1,4}-\d{2}-\d{2})?(?:[T ]?\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})?)?`) } |