summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/d5/tengo/v2/parser
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/d5/tengo/v2/parser')
-rw-r--r--vendor/github.com/d5/tengo/v2/parser/ast.go69
-rw-r--r--vendor/github.com/d5/tengo/v2/parser/expr.go597
-rw-r--r--vendor/github.com/d5/tengo/v2/parser/file.go29
-rw-r--r--vendor/github.com/d5/tengo/v2/parser/opcodes.go156
-rw-r--r--vendor/github.com/d5/tengo/v2/parser/parser.go1196
-rw-r--r--vendor/github.com/d5/tengo/v2/parser/pos.go12
-rw-r--r--vendor/github.com/d5/tengo/v2/parser/scanner.go689
-rw-r--r--vendor/github.com/d5/tengo/v2/parser/source_file.go231
-rw-r--r--vendor/github.com/d5/tengo/v2/parser/stmt.go349
9 files changed, 3328 insertions, 0 deletions
diff --git a/vendor/github.com/d5/tengo/v2/parser/ast.go b/vendor/github.com/d5/tengo/v2/parser/ast.go
new file mode 100644
index 00000000..8c2f7c07
--- /dev/null
+++ b/vendor/github.com/d5/tengo/v2/parser/ast.go
@@ -0,0 +1,69 @@
+package parser
+
+import (
+ "strings"
+)
+
+const (
+ nullRep = "<null>"
+)
+
+// Node represents a node in the AST.
+type Node interface {
+ // Pos returns the position of first character belonging to the node.
+ Pos() Pos
+ // End returns the position of first character immediately after the node.
+ End() Pos
+ // String returns a string representation of the node.
+ String() string
+}
+
+// IdentList represents a list of identifiers.
+type IdentList struct {
+ LParen Pos
+ VarArgs bool
+ List []*Ident
+ RParen Pos
+}
+
+// Pos returns the position of first character belonging to the node.
+func (n *IdentList) Pos() Pos {
+ if n.LParen.IsValid() {
+ return n.LParen
+ }
+ if len(n.List) > 0 {
+ return n.List[0].Pos()
+ }
+ return NoPos
+}
+
+// End returns the position of first character immediately after the node.
+func (n *IdentList) End() Pos {
+ if n.RParen.IsValid() {
+ return n.RParen + 1
+ }
+ if l := len(n.List); l > 0 {
+ return n.List[l-1].End()
+ }
+ return NoPos
+}
+
+// NumFields returns the number of fields.
+func (n *IdentList) NumFields() int {
+ if n == nil {
+ return 0
+ }
+ return len(n.List)
+}
+
+func (n *IdentList) String() string {
+ var list []string
+ for i, e := range n.List {
+ if n.VarArgs && i == len(n.List)-1 {
+ list = append(list, "..."+e.String())
+ } else {
+ list = append(list, e.String())
+ }
+ }
+ return "(" + strings.Join(list, ", ") + ")"
+}
diff --git a/vendor/github.com/d5/tengo/v2/parser/expr.go b/vendor/github.com/d5/tengo/v2/parser/expr.go
new file mode 100644
index 00000000..71e5155b
--- /dev/null
+++ b/vendor/github.com/d5/tengo/v2/parser/expr.go
@@ -0,0 +1,597 @@
+package parser
+
+import (
+ "strings"
+
+ "github.com/d5/tengo/v2/token"
+)
+
+// Expr represents an expression node in the AST.
+type Expr interface {
+ Node
+ exprNode()
+}
+
+// ArrayLit represents an array literal.
+type ArrayLit struct {
+ Elements []Expr
+ LBrack Pos
+ RBrack Pos
+}
+
+func (e *ArrayLit) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *ArrayLit) Pos() Pos {
+ return e.LBrack
+}
+
+// End returns the position of first character immediately after the node.
+func (e *ArrayLit) End() Pos {
+ return e.RBrack + 1
+}
+
+func (e *ArrayLit) String() string {
+ var elements []string
+ for _, m := range e.Elements {
+ elements = append(elements, m.String())
+ }
+ return "[" + strings.Join(elements, ", ") + "]"
+}
+
+// BadExpr represents a bad expression.
+type BadExpr struct {
+ From Pos
+ To Pos
+}
+
+func (e *BadExpr) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *BadExpr) Pos() Pos {
+ return e.From
+}
+
+// End returns the position of first character immediately after the node.
+func (e *BadExpr) End() Pos {
+ return e.To
+}
+
+func (e *BadExpr) String() string {
+ return "<bad expression>"
+}
+
+// BinaryExpr represents a binary operator expression.
+type BinaryExpr struct {
+ LHS Expr
+ RHS Expr
+ Token token.Token
+ TokenPos Pos
+}
+
+func (e *BinaryExpr) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *BinaryExpr) Pos() Pos {
+ return e.LHS.Pos()
+}
+
+// End returns the position of first character immediately after the node.
+func (e *BinaryExpr) End() Pos {
+ return e.RHS.End()
+}
+
+func (e *BinaryExpr) String() string {
+ return "(" + e.LHS.String() + " " + e.Token.String() +
+ " " + e.RHS.String() + ")"
+}
+
+// BoolLit represents a boolean literal.
+type BoolLit struct {
+ Value bool
+ ValuePos Pos
+ Literal string
+}
+
+func (e *BoolLit) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *BoolLit) Pos() Pos {
+ return e.ValuePos
+}
+
+// End returns the position of first character immediately after the node.
+func (e *BoolLit) End() Pos {
+ return Pos(int(e.ValuePos) + len(e.Literal))
+}
+
+func (e *BoolLit) String() string {
+ return e.Literal
+}
+
+// CallExpr represents a function call expression.
+type CallExpr struct {
+ Func Expr
+ LParen Pos
+ Args []Expr
+ RParen Pos
+}
+
+func (e *CallExpr) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *CallExpr) Pos() Pos {
+ return e.Func.Pos()
+}
+
+// End returns the position of first character immediately after the node.
+func (e *CallExpr) End() Pos {
+ return e.RParen + 1
+}
+
+func (e *CallExpr) String() string {
+ var args []string
+ for _, e := range e.Args {
+ args = append(args, e.String())
+ }
+ return e.Func.String() + "(" + strings.Join(args, ", ") + ")"
+}
+
+// CharLit represents a character literal.
+type CharLit struct {
+ Value rune
+ ValuePos Pos
+ Literal string
+}
+
+func (e *CharLit) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *CharLit) Pos() Pos {
+ return e.ValuePos
+}
+
+// End returns the position of first character immediately after the node.
+func (e *CharLit) End() Pos {
+ return Pos(int(e.ValuePos) + len(e.Literal))
+}
+
+func (e *CharLit) String() string {
+ return e.Literal
+}
+
+// CondExpr represents a ternary conditional expression.
+type CondExpr struct {
+ Cond Expr
+ True Expr
+ False Expr
+ QuestionPos Pos
+ ColonPos Pos
+}
+
+func (e *CondExpr) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *CondExpr) Pos() Pos {
+ return e.Cond.Pos()
+}
+
+// End returns the position of first character immediately after the node.
+func (e *CondExpr) End() Pos {
+ return e.False.End()
+}
+
+func (e *CondExpr) String() string {
+ return "(" + e.Cond.String() + " ? " + e.True.String() +
+ " : " + e.False.String() + ")"
+}
+
+// ErrorExpr represents an error expression
+type ErrorExpr struct {
+ Expr Expr
+ ErrorPos Pos
+ LParen Pos
+ RParen Pos
+}
+
+func (e *ErrorExpr) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *ErrorExpr) Pos() Pos {
+ return e.ErrorPos
+}
+
+// End returns the position of first character immediately after the node.
+func (e *ErrorExpr) End() Pos {
+ return e.RParen
+}
+
+func (e *ErrorExpr) String() string {
+ return "error(" + e.Expr.String() + ")"
+}
+
+// FloatLit represents a floating point literal.
+type FloatLit struct {
+ Value float64
+ ValuePos Pos
+ Literal string
+}
+
+func (e *FloatLit) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *FloatLit) Pos() Pos {
+ return e.ValuePos
+}
+
+// End returns the position of first character immediately after the node.
+func (e *FloatLit) End() Pos {
+ return Pos(int(e.ValuePos) + len(e.Literal))
+}
+
+func (e *FloatLit) String() string {
+ return e.Literal
+}
+
+// FuncLit represents a function literal.
+type FuncLit struct {
+ Type *FuncType
+ Body *BlockStmt
+}
+
+func (e *FuncLit) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *FuncLit) Pos() Pos {
+ return e.Type.Pos()
+}
+
+// End returns the position of first character immediately after the node.
+func (e *FuncLit) End() Pos {
+ return e.Body.End()
+}
+
+func (e *FuncLit) String() string {
+ return "func" + e.Type.Params.String() + " " + e.Body.String()
+}
+
+// FuncType represents a function type definition.
+type FuncType struct {
+ FuncPos Pos
+ Params *IdentList
+}
+
+func (e *FuncType) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *FuncType) Pos() Pos {
+ return e.FuncPos
+}
+
+// End returns the position of first character immediately after the node.
+func (e *FuncType) End() Pos {
+ return e.Params.End()
+}
+
+func (e *FuncType) String() string {
+ return "func" + e.Params.String()
+}
+
+// Ident represents an identifier.
+type Ident struct {
+ Name string
+ NamePos Pos
+}
+
+func (e *Ident) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *Ident) Pos() Pos {
+ return e.NamePos
+}
+
+// End returns the position of first character immediately after the node.
+func (e *Ident) End() Pos {
+ return Pos(int(e.NamePos) + len(e.Name))
+}
+
+func (e *Ident) String() string {
+ if e != nil {
+ return e.Name
+ }
+ return nullRep
+}
+
+// ImmutableExpr represents an immutable expression
+type ImmutableExpr struct {
+ Expr Expr
+ ErrorPos Pos
+ LParen Pos
+ RParen Pos
+}
+
+func (e *ImmutableExpr) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *ImmutableExpr) Pos() Pos {
+ return e.ErrorPos
+}
+
+// End returns the position of first character immediately after the node.
+func (e *ImmutableExpr) End() Pos {
+ return e.RParen
+}
+
+func (e *ImmutableExpr) String() string {
+ return "immutable(" + e.Expr.String() + ")"
+}
+
+// ImportExpr represents an import expression
+type ImportExpr struct {
+ ModuleName string
+ Token token.Token
+ TokenPos Pos
+}
+
+func (e *ImportExpr) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *ImportExpr) Pos() Pos {
+ return e.TokenPos
+}
+
+// End returns the position of first character immediately after the node.
+func (e *ImportExpr) End() Pos {
+ // import("moduleName")
+ return Pos(int(e.TokenPos) + 10 + len(e.ModuleName))
+}
+
+func (e *ImportExpr) String() string {
+ return `import("` + e.ModuleName + `")"`
+}
+
+// IndexExpr represents an index expression.
+type IndexExpr struct {
+ Expr Expr
+ LBrack Pos
+ Index Expr
+ RBrack Pos
+}
+
+func (e *IndexExpr) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *IndexExpr) Pos() Pos {
+ return e.Expr.Pos()
+}
+
+// End returns the position of first character immediately after the node.
+func (e *IndexExpr) End() Pos {
+ return e.RBrack + 1
+}
+
+func (e *IndexExpr) String() string {
+ var index string
+ if e.Index != nil {
+ index = e.Index.String()
+ }
+ return e.Expr.String() + "[" + index + "]"
+}
+
+// IntLit represents an integer literal.
+type IntLit struct {
+ Value int64
+ ValuePos Pos
+ Literal string
+}
+
+func (e *IntLit) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *IntLit) Pos() Pos {
+ return e.ValuePos
+}
+
+// End returns the position of first character immediately after the node.
+func (e *IntLit) End() Pos {
+ return Pos(int(e.ValuePos) + len(e.Literal))
+}
+
+func (e *IntLit) String() string {
+ return e.Literal
+}
+
+// MapElementLit represents a map element.
+type MapElementLit struct {
+ Key string
+ KeyPos Pos
+ ColonPos Pos
+ Value Expr
+}
+
+func (e *MapElementLit) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *MapElementLit) Pos() Pos {
+ return e.KeyPos
+}
+
+// End returns the position of first character immediately after the node.
+func (e *MapElementLit) End() Pos {
+ return e.Value.End()
+}
+
+func (e *MapElementLit) String() string {
+ return e.Key + ": " + e.Value.String()
+}
+
+// MapLit represents a map literal.
+type MapLit struct {
+ LBrace Pos
+ Elements []*MapElementLit
+ RBrace Pos
+}
+
+func (e *MapLit) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *MapLit) Pos() Pos {
+ return e.LBrace
+}
+
+// End returns the position of first character immediately after the node.
+func (e *MapLit) End() Pos {
+ return e.RBrace + 1
+}
+
+func (e *MapLit) String() string {
+ var elements []string
+ for _, m := range e.Elements {
+ elements = append(elements, m.String())
+ }
+ return "{" + strings.Join(elements, ", ") + "}"
+}
+
+// ParenExpr represents a parenthesis wrapped expression.
+type ParenExpr struct {
+ Expr Expr
+ LParen Pos
+ RParen Pos
+}
+
+func (e *ParenExpr) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *ParenExpr) Pos() Pos {
+ return e.LParen
+}
+
+// End returns the position of first character immediately after the node.
+func (e *ParenExpr) End() Pos {
+ return e.RParen + 1
+}
+
+func (e *ParenExpr) String() string {
+ return "(" + e.Expr.String() + ")"
+}
+
+// SelectorExpr represents a selector expression.
+type SelectorExpr struct {
+ Expr Expr
+ Sel Expr
+}
+
+func (e *SelectorExpr) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *SelectorExpr) Pos() Pos {
+ return e.Expr.Pos()
+}
+
+// End returns the position of first character immediately after the node.
+func (e *SelectorExpr) End() Pos {
+ return e.Sel.End()
+}
+
+func (e *SelectorExpr) String() string {
+ return e.Expr.String() + "." + e.Sel.String()
+}
+
+// SliceExpr represents a slice expression.
+type SliceExpr struct {
+ Expr Expr
+ LBrack Pos
+ Low Expr
+ High Expr
+ RBrack Pos
+}
+
+func (e *SliceExpr) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *SliceExpr) Pos() Pos {
+ return e.Expr.Pos()
+}
+
+// End returns the position of first character immediately after the node.
+func (e *SliceExpr) End() Pos {
+ return e.RBrack + 1
+}
+
+func (e *SliceExpr) String() string {
+ var low, high string
+ if e.Low != nil {
+ low = e.Low.String()
+ }
+ if e.High != nil {
+ high = e.High.String()
+ }
+ return e.Expr.String() + "[" + low + ":" + high + "]"
+}
+
+// StringLit represents a string literal.
+type StringLit struct {
+ Value string
+ ValuePos Pos
+ Literal string
+}
+
+func (e *StringLit) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *StringLit) Pos() Pos {
+ return e.ValuePos
+}
+
+// End returns the position of first character immediately after the node.
+func (e *StringLit) End() Pos {
+ return Pos(int(e.ValuePos) + len(e.Literal))
+}
+
+func (e *StringLit) String() string {
+ return e.Literal
+}
+
+// UnaryExpr represents an unary operator expression.
+type UnaryExpr struct {
+ Expr Expr
+ Token token.Token
+ TokenPos Pos
+}
+
+func (e *UnaryExpr) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *UnaryExpr) Pos() Pos {
+ return e.Expr.Pos()
+}
+
+// End returns the position of first character immediately after the node.
+func (e *UnaryExpr) End() Pos {
+ return e.Expr.End()
+}
+
+func (e *UnaryExpr) String() string {
+ return "(" + e.Token.String() + e.Expr.String() + ")"
+}
+
+// UndefinedLit represents an undefined literal.
+type UndefinedLit struct {
+ TokenPos Pos
+}
+
+func (e *UndefinedLit) exprNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (e *UndefinedLit) Pos() Pos {
+ return e.TokenPos
+}
+
+// End returns the position of first character immediately after the node.
+func (e *UndefinedLit) End() Pos {
+ return e.TokenPos + 9 // len(undefined) == 9
+}
+
+func (e *UndefinedLit) String() string {
+ return "undefined"
+}
diff --git a/vendor/github.com/d5/tengo/v2/parser/file.go b/vendor/github.com/d5/tengo/v2/parser/file.go
new file mode 100644
index 00000000..7cf50fea
--- /dev/null
+++ b/vendor/github.com/d5/tengo/v2/parser/file.go
@@ -0,0 +1,29 @@
+package parser
+
+import (
+ "strings"
+)
+
+// File represents a file unit.
+type File struct {
+ InputFile *SourceFile
+ Stmts []Stmt
+}
+
+// Pos returns the position of first character belonging to the node.
+func (n *File) Pos() Pos {
+ return Pos(n.InputFile.Base)
+}
+
+// End returns the position of first character immediately after the node.
+func (n *File) End() Pos {
+ return Pos(n.InputFile.Base + n.InputFile.Size)
+}
+
+func (n *File) String() string {
+ var stmts []string
+ for _, e := range n.Stmts {
+ stmts = append(stmts, e.String())
+ }
+ return strings.Join(stmts, "; ")
+}
diff --git a/vendor/github.com/d5/tengo/v2/parser/opcodes.go b/vendor/github.com/d5/tengo/v2/parser/opcodes.go
new file mode 100644
index 00000000..a4fbfbaf
--- /dev/null
+++ b/vendor/github.com/d5/tengo/v2/parser/opcodes.go
@@ -0,0 +1,156 @@
+package parser
+
+// Opcode represents a single byte operation code.
+type Opcode = byte
+
+// List of opcodes
+const (
+ OpConstant Opcode = iota // Load constant
+ OpBComplement // bitwise complement
+ OpPop // Pop
+ OpTrue // Push true
+ OpFalse // Push false
+ OpEqual // Equal ==
+ OpNotEqual // Not equal !=
+ OpMinus // Minus -
+ OpLNot // Logical not !
+ OpJumpFalsy // Jump if falsy
+ OpAndJump // Logical AND jump
+ OpOrJump // Logical OR jump
+ OpJump // Jump
+ OpNull // Push null
+ OpArray // Array object
+ OpMap // Map object
+ OpError // Error object
+ OpImmutable // Immutable object
+ OpIndex // Index operation
+ OpSliceIndex // Slice operation
+ OpCall // Call function
+ OpReturn // Return
+ OpGetGlobal // Get global variable
+ OpSetGlobal // Set global variable
+ OpSetSelGlobal // Set global variable using selectors
+ OpGetLocal // Get local variable
+ OpSetLocal // Set local variable
+ OpDefineLocal // Define local variable
+ OpSetSelLocal // Set local variable using selectors
+ OpGetFreePtr // Get free variable pointer object
+ OpGetFree // Get free variables
+ OpSetFree // Set free variables
+ OpGetLocalPtr // Get local variable as a pointer
+ OpSetSelFree // Set free variables using selectors
+ OpGetBuiltin // Get builtin function
+ OpClosure // Push closure
+ OpIteratorInit // Iterator init
+ OpIteratorNext // Iterator next
+ OpIteratorKey // Iterator key
+ OpIteratorValue // Iterator value
+ OpBinaryOp // Binary operation
+ OpSuspend // Suspend VM
+)
+
+// OpcodeNames are string representation of opcodes.
+var OpcodeNames = [...]string{
+ OpConstant: "CONST",
+ OpPop: "POP",
+ OpTrue: "TRUE",
+ OpFalse: "FALSE",
+ OpBComplement: "NEG",
+ OpEqual: "EQL",
+ OpNotEqual: "NEQ",
+ OpMinus: "NEG",
+ OpLNot: "NOT",
+ OpJumpFalsy: "JMPF",
+ OpAndJump: "ANDJMP",
+ OpOrJump: "ORJMP",
+ OpJump: "JMP",
+ OpNull: "NULL",
+ OpGetGlobal: "GETG",
+ OpSetGlobal: "SETG",
+ OpSetSelGlobal: "SETSG",
+ OpArray: "ARR",
+ OpMap: "MAP",
+ OpError: "ERROR",
+ OpImmutable: "IMMUT",
+ OpIndex: "INDEX",
+ OpSliceIndex: "SLICE",
+ OpCall: "CALL",
+ OpReturn: "RET",
+ OpGetLocal: "GETL",
+ OpSetLocal: "SETL",
+ OpDefineLocal: "DEFL",
+ OpSetSelLocal: "SETSL",
+ OpGetBuiltin: "BUILTIN",
+ OpClosure: "CLOSURE",
+ OpGetFreePtr: "GETFP",
+ OpGetFree: "GETF",
+ OpSetFree: "SETF",
+ OpGetLocalPtr: "GETLP",
+ OpSetSelFree: "SETSF",
+ OpIteratorInit: "ITER",
+ OpIteratorNext: "ITNXT",
+ OpIteratorKey: "ITKEY",
+ OpIteratorValue: "ITVAL",
+ OpBinaryOp: "BINARYOP",
+ OpSuspend: "SUSPEND",
+}
+
+// OpcodeOperands is the number of operands.
+var OpcodeOperands = [...][]int{
+ OpConstant: {2},
+ OpPop: {},
+ OpTrue: {},
+ OpFalse: {},
+ OpBComplement: {},
+ OpEqual: {},
+ OpNotEqual: {},
+ OpMinus: {},
+ OpLNot: {},
+ OpJumpFalsy: {2},
+ OpAndJump: {2},
+ OpOrJump: {2},
+ OpJump: {2},
+ OpNull: {},
+ OpGetGlobal: {2},
+ OpSetGlobal: {2},
+ OpSetSelGlobal: {2, 1},
+ OpArray: {2},
+ OpMap: {2},
+ OpError: {},
+ OpImmutable: {},
+ OpIndex: {},
+ OpSliceIndex: {},
+ OpCall: {1},
+ OpReturn: {1},
+ OpGetLocal: {1},
+ OpSetLocal: {1},
+ OpDefineLocal: {1},
+ OpSetSelLocal: {1, 1},
+ OpGetBuiltin: {1},
+ OpClosure: {2, 1},
+ OpGetFreePtr: {1},
+ OpGetFree: {1},
+ OpSetFree: {1},
+ OpGetLocalPtr: {1},
+ OpSetSelFree: {1, 1},
+ OpIteratorInit: {},
+ OpIteratorNext: {},
+ OpIteratorKey: {},
+ OpIteratorValue: {},
+ OpBinaryOp: {1},
+ OpSuspend: {},
+}
+
+// ReadOperands reads operands from the bytecode.
+func ReadOperands(numOperands []int, ins []byte) (operands []int, offset int) {
+ for _, width := range numOperands {
+ switch width {
+ case 1:
+ operands = append(operands, int(ins[offset]))
+ case 2:
+ operands = append(operands, int(ins[offset+1])|int(ins[offset])<<8)
+ }
+ offset += width
+ }
+ return
+}
diff --git a/vendor/github.com/d5/tengo/v2/parser/parser.go b/vendor/github.com/d5/tengo/v2/parser/parser.go
new file mode 100644
index 00000000..501a9106
--- /dev/null
+++ b/vendor/github.com/d5/tengo/v2/parser/parser.go
@@ -0,0 +1,1196 @@
+package parser
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "strconv"
+
+ "github.com/d5/tengo/v2/token"
+)
+
+type bailout struct{}
+
+var stmtStart = map[token.Token]bool{
+ token.Break: true,
+ token.Continue: true,
+ token.For: true,
+ token.If: true,
+ token.Return: true,
+ token.Export: true,
+}
+
+// Error represents a parser error.
+type Error struct {
+ Pos SourceFilePos
+ Msg string
+}
+
+func (e Error) Error() string {
+ if e.Pos.Filename != "" || e.Pos.IsValid() {
+ return fmt.Sprintf("Parse Error: %s\n\tat %s", e.Msg, e.Pos)
+ }
+ return fmt.Sprintf("Parse Error: %s", e.Msg)
+}
+
+// ErrorList is a collection of parser errors.
+type ErrorList []*Error
+
+// Add adds a new parser error to the collection.
+func (p *ErrorList) Add(pos SourceFilePos, msg string) {
+ *p = append(*p, &Error{pos, msg})
+}
+
+// Len returns the number of elements in the collection.
+func (p ErrorList) Len() int {
+ return len(p)
+}
+
+func (p ErrorList) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+func (p ErrorList) Less(i, j int) bool {
+ e := &p[i].Pos
+ f := &p[j].Pos
+
+ if e.Filename != f.Filename {
+ return e.Filename < f.Filename
+ }
+ if e.Line != f.Line {
+ return e.Line < f.Line
+ }
+ if e.Column != f.Column {
+ return e.Column < f.Column
+ }
+ return p[i].Msg < p[j].Msg
+}
+
+// Sort sorts the collection.
+func (p ErrorList) Sort() {
+ sort.Sort(p)
+}
+
+func (p ErrorList) Error() string {
+ switch len(p) {
+ case 0:
+ return "no errors"
+ case 1:
+ return p[0].Error()
+ }
+ return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
+}
+
+// Err returns an error.
+func (p ErrorList) Err() error {
+ if len(p) == 0 {
+ return nil
+ }
+ return p
+}
+
+// Parser parses the Tengo source files. It's based on Go's parser
+// implementation.
+type Parser struct {
+ file *SourceFile
+ errors ErrorList
+ scanner *Scanner
+ pos Pos
+ token token.Token
+ tokenLit string
+ exprLevel int // < 0: in control clause, >= 0: in expression
+ syncPos Pos // last sync position
+ syncCount int // number of advance calls without progress
+ trace bool
+ indent int
+ traceOut io.Writer
+}
+
+// NewParser creates a Parser.
+func NewParser(file *SourceFile, src []byte, trace io.Writer) *Parser {
+ p := &Parser{
+ file: file,
+ trace: trace != nil,
+ traceOut: trace,
+ }
+ p.scanner = NewScanner(p.file, src,
+ func(pos SourceFilePos, msg string) {
+ p.errors.Add(pos, msg)
+ }, 0)
+ p.next()
+ return p
+}
+
+// ParseFile parses the source and returns an AST file unit.
+func (p *Parser) ParseFile() (file *File, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if _, ok := e.(bailout); !ok {
+ panic(e)
+ }
+ }
+
+ p.errors.Sort()
+ err = p.errors.Err()
+ }()
+
+ if p.trace {
+ defer untracep(tracep(p, "File"))
+ }
+
+ if p.errors.Len() > 0 {
+ return nil, p.errors.Err()
+ }
+
+ stmts := p.parseStmtList()
+ if p.errors.Len() > 0 {
+ return nil, p.errors.Err()
+ }
+
+ file = &File{
+ InputFile: p.file,
+ Stmts: stmts,
+ }
+ return
+}
+
+func (p *Parser) parseExpr() Expr {
+ if p.trace {
+ defer untracep(tracep(p, "Expression"))
+ }
+
+ expr := p.parseBinaryExpr(token.LowestPrec + 1)
+
+ // ternary conditional expression
+ if p.token == token.Question {
+ return p.parseCondExpr(expr)
+ }
+ return expr
+}
+
+func (p *Parser) parseBinaryExpr(prec1 int) Expr {
+ if p.trace {
+ defer untracep(tracep(p, "BinaryExpression"))
+ }
+
+ x := p.parseUnaryExpr()
+
+ for {
+ op, prec := p.token, p.token.Precedence()
+ if prec < prec1 {
+ return x
+ }
+
+ pos := p.expect(op)
+
+ y := p.parseBinaryExpr(prec + 1)
+
+ x = &BinaryExpr{
+ LHS: x,
+ RHS: y,
+ Token: op,
+ TokenPos: pos,
+ }
+ }
+}
+
+func (p *Parser) parseCondExpr(cond Expr) Expr {
+ questionPos := p.expect(token.Question)
+ trueExpr := p.parseExpr()
+ colonPos := p.expect(token.Colon)
+ falseExpr := p.parseExpr()
+
+ return &CondExpr{
+ Cond: cond,
+ True: trueExpr,
+ False: falseExpr,
+ QuestionPos: questionPos,
+ ColonPos: colonPos,
+ }
+}
+
+func (p *Parser) parseUnaryExpr() Expr {
+ if p.trace {
+ defer untracep(tracep(p, "UnaryExpression"))
+ }
+
+ switch p.token {
+ case token.Add, token.Sub, token.Not, token.Xor:
+ pos, op := p.pos, p.token
+ p.next()
+ x := p.parseUnaryExpr()
+ return &UnaryExpr{
+ Token: op,
+ TokenPos: pos,
+ Expr: x,
+ }
+ }
+ return p.parsePrimaryExpr()
+}
+
+func (p *Parser) parsePrimaryExpr() Expr {
+ if p.trace {
+ defer untracep(tracep(p, "PrimaryExpression"))
+ }
+
+ x := p.parseOperand()
+
+L:
+ for {
+ switch p.token {
+ case token.Period:
+ p.next()
+
+ switch p.token {
+ case token.Ident:
+ x = p.parseSelector(x)
+ default:
+ pos := p.pos
+ p.errorExpected(pos, "selector")
+ p.advance(stmtStart)
+ return &BadExpr{From: pos, To: p.pos}
+ }
+ case token.LBrack:
+ x = p.parseIndexOrSlice(x)
+ case token.LParen:
+ x = p.parseCall(x)
+ default:
+ break L
+ }
+ }
+ return x
+}
+
+func (p *Parser) parseCall(x Expr) *CallExpr {
+ if p.trace {
+ defer untracep(tracep(p, "Call"))
+ }
+
+ lparen := p.expect(token.LParen)
+ p.exprLevel++
+
+ var list []Expr
+ for p.token != token.RParen && p.token != token.EOF {
+ list = append(list, p.parseExpr())
+
+ if !p.expectComma(token.RParen, "call argument") {
+ break
+ }
+ }
+
+ p.exprLevel--
+ rparen := p.expect(token.RParen)
+ return &CallExpr{
+ Func: x,
+ LParen: lparen,
+ RParen: rparen,
+ Args: list,
+ }
+}
+
+func (p *Parser) expectComma(closing token.Token, want string) bool {
+ if p.token == token.Comma {
+ p.next()
+
+ if p.token == closing {
+ p.errorExpected(p.pos, want)
+ return false
+ }
+ return true
+ }
+
+ if p.token == token.Semicolon && p.tokenLit == "\n" {
+ p.next()
+ }
+ return false
+}
+
+func (p *Parser) parseIndexOrSlice(x Expr) Expr {
+ if p.trace {
+ defer untracep(tracep(p, "IndexOrSlice"))
+ }
+
+ lbrack := p.expect(token.LBrack)
+ p.exprLevel++
+
+ var index [2]Expr
+ if p.token != token.Colon {
+ index[0] = p.parseExpr()
+ }
+ numColons := 0
+ if p.token == token.Colon {
+ numColons++
+ p.next()
+
+ if p.token != token.RBrack && p.token != token.EOF {
+ index[1] = p.parseExpr()
+ }
+ }
+
+ p.exprLevel--
+ rbrack := p.expect(token.RBrack)
+
+ if numColons > 0 {
+ // slice expression
+ return &SliceExpr{
+ Expr: x,
+ LBrack: lbrack,
+ RBrack: rbrack,
+ Low: index[0],
+ High: index[1],
+ }
+ }
+ return &IndexExpr{
+ Expr: x,
+ LBrack: lbrack,
+ RBrack: rbrack,
+ Index: index[0],
+ }
+}
+
+func (p *Parser) parseSelector(x Expr) Expr {
+ if p.trace {
+ defer untracep(tracep(p, "Selector"))
+ }
+
+ sel := p.parseIdent()
+ return &SelectorExpr{Expr: x, Sel: &StringLit{
+ Value: sel.Name,
+ ValuePos: sel.NamePos,
+ Literal: sel.Name,
+ }}
+}
+
+func (p *Parser) parseOperand() Expr {
+ if p.trace {
+ defer untracep(tracep(p, "Operand"))
+ }
+
+ switch p.token {
+ case token.Ident:
+ return p.parseIdent()
+ case token.Int:
+ v, _ := strconv.ParseInt(p.tokenLit, 10, 64)
+ x := &IntLit{
+ Value: v,
+ ValuePos: p.pos,
+ Literal: p.tokenLit,
+ }
+ p.next()
+ return x
+ case token.Float:
+ v, _ := strconv.ParseFloat(p.tokenLit, 64)
+ x := &FloatLit{
+ Value: v,
+ ValuePos: p.pos,
+ Literal: p.tokenLit,
+ }
+ p.next()
+ return x
+ case token.Char:
+ return p.parseCharLit()
+ case token.String:
+ v, _ := strconv.Unquote(p.tokenLit)
+ x := &StringLit{
+ Value: v,
+ ValuePos: p.pos,
+ Literal: p.tokenLit,
+ }
+ p.next()
+ return x
+ case token.True:
+ x := &BoolLit{
+ Value: true,
+ ValuePos: p.pos,
+ Literal: p.tokenLit,
+ }
+ p.next()
+ return x
+ case token.False:
+ x := &BoolLit{
+ Value: false,
+ ValuePos: p.pos,
+ Literal: p.tokenLit,
+ }
+ p.next()
+ return x
+ case token.Undefined:
+ x := &UndefinedLit{TokenPos: p.pos}
+ p.next()
+ return x
+ case token.Import:
+ return p.parseImportExpr()
+ case token.LParen:
+ lparen := p.pos
+ p.next()
+ p.exprLevel++
+ x := p.parseExpr()
+ p.exprLevel--
+ rparen := p.expect(token.RParen)
+ return &ParenExpr{
+ LParen: lparen,
+ Expr: x,
+ RParen: rparen,
+ }
+ case token.LBrack: // array literal
+ return p.parseArrayLit()
+ case token.LBrace: // map literal
+ return p.parseMapLit()
+ case token.Func: // function literal
+ return p.parseFuncLit()
+ case token.Error: // error expression
+ return p.parseErrorExpr()
+ case token.Immutable: // immutable expression
+ return p.parseImmutableExpr()
+ }
+
+ pos := p.pos
+ p.errorExpected(pos, "operand")
+ p.advance(stmtStart)
+ return &BadExpr{From: pos, To: p.pos}
+}
+
+func (p *Parser) parseImportExpr() Expr {
+ pos := p.pos
+ p.next()
+ p.expect(token.LParen)
+ if p.token != token.String {
+ p.errorExpected(p.pos, "module name")
+ p.advance(stmtStart)
+ return &BadExpr{From: pos, To: p.pos}
+ }
+
+ // module name
+ moduleName, _ := strconv.Unquote(p.tokenLit)
+ expr := &ImportExpr{
+ ModuleName: moduleName,
+ Token: token.Import,
+ TokenPos: pos,
+ }
+
+ p.next()
+ p.expect(token.RParen)
+ return expr
+}
+
+func (p *Parser) parseCharLit() Expr {
+ if n := len(p.tokenLit); n >= 3 {
+ code, _, _, err := strconv.UnquoteChar(p.tokenLit[1:n-1], '\'')
+ if err == nil {
+ x := &CharLit{
+ Value: code,
+ ValuePos: p.pos,
+ Literal: p.tokenLit,
+ }
+ p.next()
+ return x
+ }
+ }
+
+ pos := p.pos
+ p.error(pos, "illegal char literal")
+ p.next()
+ return &BadExpr{
+ From: pos,
+ To: p.pos,
+ }
+}
+
+func (p *Parser) parseFuncLit() Expr {
+ if p.trace {
+ defer untracep(tracep(p, "FuncLit"))
+ }
+
+ typ := p.parseFuncType()
+ p.exprLevel++
+ body := p.parseBody()
+ p.exprLevel--
+ return &FuncLit{
+ Type: typ,
+ Body: body,
+ }
+}
+
+func (p *Parser) parseArrayLit() Expr {
+ if p.trace {
+ defer untracep(tracep(p, "ArrayLit"))
+ }
+
+ lbrack := p.expect(token.LBrack)
+ p.exprLevel++
+
+ var elements []Expr
+ for p.token != token.RBrack && p.token != token.EOF {
+ elements = append(elements, p.parseExpr())
+
+ if !p.expectComma(token.RBrack, "array element") {
+ break
+ }
+ }
+
+ p.exprLevel--
+ rbrack := p.expect(token.RBrack)
+ return &ArrayLit{
+ Elements: elements,
+ LBrack: lbrack,
+ RBrack: rbrack,
+ }
+}
+
+func (p *Parser) parseErrorExpr() Expr {
+ pos := p.pos
+
+ p.next()
+ lparen := p.expect(token.LParen)
+ value := p.parseExpr()
+ rparen := p.expect(token.RParen)
+ return &ErrorExpr{
+ ErrorPos: pos,
+ Expr: value,
+ LParen: lparen,
+ RParen: rparen,
+ }
+}
+
+func (p *Parser) parseImmutableExpr() Expr {
+ pos := p.pos
+
+ p.next()
+ lparen := p.expect(token.LParen)
+ value := p.parseExpr()
+ rparen := p.expect(token.RParen)
+ return &ImmutableExpr{
+ ErrorPos: pos,
+ Expr: value,
+ LParen: lparen,
+ RParen: rparen,
+ }
+}
+
+func (p *Parser) parseFuncType() *FuncType {
+ if p.trace {
+ defer untracep(tracep(p, "FuncType"))
+ }
+
+ pos := p.expect(token.Func)
+ params := p.parseIdentList()
+ return &FuncType{
+ FuncPos: pos,
+ Params: params,
+ }
+}
+
+func (p *Parser) parseBody() *BlockStmt {
+ if p.trace {
+ defer untracep(tracep(p, "Body"))
+ }
+
+ lbrace := p.expect(token.LBrace)
+ list := p.parseStmtList()
+ rbrace := p.expect(token.RBrace)
+ return &BlockStmt{
+ LBrace: lbrace,
+ RBrace: rbrace,
+ Stmts: list,
+ }
+}
+
+func (p *Parser) parseStmtList() (list []Stmt) {
+ if p.trace {
+ defer untracep(tracep(p, "StatementList"))
+ }
+
+ for p.token != token.RBrace && p.token != token.EOF {
+ list = append(list, p.parseStmt())
+ }
+ return
+}
+
+func (p *Parser) parseIdent() *Ident {
+ pos := p.pos
+ name := "_"
+
+ if p.token == token.Ident {
+ name = p.tokenLit
+ p.next()
+ } else {
+ p.expect(token.Ident)
+ }
+ return &Ident{
+ NamePos: pos,
+ Name: name,
+ }
+}
+
+func (p *Parser) parseIdentList() *IdentList {
+ if p.trace {
+ defer untracep(tracep(p, "IdentList"))
+ }
+
+ var params []*Ident
+ lparen := p.expect(token.LParen)
+ isVarArgs := false
+ if p.token != token.RParen {
+ if p.token == token.Ellipsis {
+ isVarArgs = true
+ p.next()
+ }
+
+ params = append(params, p.parseIdent())
+ for !isVarArgs && p.token == token.Comma {
+ p.next()
+ if p.token == token.Ellipsis {
+ isVarArgs = true
+ p.next()
+ }
+ params = append(params, p.parseIdent())
+ }
+ }
+
+ rparen := p.expect(token.RParen)
+ return &IdentList{
+ LParen: lparen,
+ RParen: rparen,
+ VarArgs: isVarArgs,
+ List: params,
+ }
+}
+
+func (p *Parser) parseStmt() (stmt Stmt) {
+ if p.trace {
+ defer untracep(tracep(p, "Statement"))
+ }
+
+ switch p.token {
+ case // simple statements
+ token.Func, token.Error, token.Immutable, token.Ident, token.Int,
+ token.Float, token.Char, token.String, token.True, token.False,
+ token.Undefined, token.Import, token.LParen, token.LBrace,
+ token.LBrack, token.Add, token.Sub, token.Mul, token.And, token.Xor,
+ token.Not:
+ s := p.parseSimpleStmt(false)
+ p.expectSemi()
+ return s
+ case token.Return:
+ return p.parseReturnStmt()
+ case token.Export:
+ return p.parseExportStmt()
+ case token.If:
+ return p.parseIfStmt()
+ case token.For:
+ return p.parseForStmt()
+ case token.Break, token.Continue:
+ return p.parseBranchStmt(p.token)
+ case token.Semicolon:
+ s := &EmptyStmt{Semicolon: p.pos, Implicit: p.tokenLit == "\n"}
+ p.next()
+ return s
+ case token.RBrace:
+ // semicolon may be omitted before a closing "}"
+ return &EmptyStmt{Semicolon: p.pos, Implicit: true}
+ default:
+ pos := p.pos
+ p.errorExpected(pos, "statement")
+ p.advance(stmtStart)
+ return &BadStmt{From: pos, To: p.pos}
+ }
+}
+
+func (p *Parser) parseForStmt() Stmt {
+ if p.trace {
+ defer untracep(tracep(p, "ForStmt"))
+ }
+
+ pos := p.expect(token.For)
+
+ // for {}
+ if p.token == token.LBrace {
+ body := p.parseBlockStmt()
+ p.expectSemi()
+
+ return &ForStmt{
+ ForPos: pos,
+ Body: body,
+ }
+ }
+
+ prevLevel := p.exprLevel
+ p.exprLevel = -1
+
+ var s1 Stmt
+ if p.token != token.Semicolon { // skipping init
+ s1 = p.parseSimpleStmt(true)
+ }
+
+ // for _ in seq {} or
+ // for value in seq {} or
+ // for key, value in seq {}
+ if forInStmt, isForIn := s1.(*ForInStmt); isForIn {
+ forInStmt.ForPos = pos
+ p.exprLevel = prevLevel
+ forInStmt.Body = p.parseBlockStmt()
+ p.expectSemi()
+ return forInStmt
+ }
+
+ // for init; cond; post {}
+ var s2, s3 Stmt
+ if p.token == token.Semicolon {
+ p.next()
+ if p.token != token.Semicolon {
+ s2 = p.parseSimpleStmt(false) // cond
+ }
+ p.expect(token.Semicolon)
+ if p.token != token.LBrace {
+ s3 = p.parseSimpleStmt(false) // post
+ }
+ } else {
+ // for cond {}
+ s2 = s1
+ s1 = nil
+ }
+
+ // body
+ p.exprLevel = prevLevel
+ body := p.parseBlockStmt()
+ p.expectSemi()
+ cond := p.makeExpr(s2, "condition expression")
+ return &ForStmt{
+ ForPos: pos,
+ Init: s1,
+ Cond: cond,
+ Post: s3,
+ Body: body,
+ }
+}
+
+func (p *Parser) parseBranchStmt(tok token.Token) Stmt {
+ if p.trace {
+ defer untracep(tracep(p, "BranchStmt"))
+ }
+
+ pos := p.expect(tok)
+
+ var label *Ident
+ if p.token == token.Ident {
+ label = p.parseIdent()
+ }
+ p.expectSemi()
+ return &BranchStmt{
+ Token: tok,
+ TokenPos: pos,
+ Label: label,
+ }
+}
+
+func (p *Parser) parseIfStmt() Stmt {
+ if p.trace {
+ defer untracep(tracep(p, "IfStmt"))
+ }
+
+ pos := p.expect(token.If)
+ init, cond := p.parseIfHeader()
+ body := p.parseBlockStmt()
+
+ var elseStmt Stmt
+ if p.token == token.Else {
+ p.next()
+
+ switch p.token {
+ case token.If:
+ elseStmt = p.parseIfStmt()
+ case token.LBrace:
+ elseStmt = p.parseBlockStmt()
+ p.expectSemi()
+ default:
+ p.errorExpected(p.pos, "if or {")
+ elseStmt = &BadStmt{From: p.pos, To: p.pos}
+ }
+ } else {
+ p.expectSemi()
+ }
+ return &IfStmt{
+ IfPos: pos,
+ Init: init,
+ Cond: cond,
+ Body: body,
+ Else: elseStmt,
+ }
+}
+
+func (p *Parser) parseBlockStmt() *BlockStmt {
+ if p.trace {
+ defer untracep(tracep(p, "BlockStmt"))
+ }
+
+ lbrace := p.expect(token.LBrace)
+ list := p.parseStmtList()
+ rbrace := p.expect(token.RBrace)
+ return &BlockStmt{
+ LBrace: lbrace,
+ RBrace: rbrace,
+ Stmts: list,
+ }
+}
+
+func (p *Parser) parseIfHeader() (init Stmt, cond Expr) {
+ if p.token == token.LBrace {
+ p.error(p.pos, "missing condition in if statement")
+ cond = &BadExpr{From: p.pos, To: p.pos}
+ return
+ }
+
+ outer := p.exprLevel
+ p.exprLevel = -1
+ if p.token == token.Semicolon {
+ p.error(p.pos, "missing init in if statement")
+ return
+ }
+ init = p.parseSimpleStmt(false)
+
+ var condStmt Stmt
+ if p.token == token.LBrace {
+ condStmt = init
+ init = nil
+ } else if p.token == token.Semicolon {
+ p.next()
+
+ condStmt = p.parseSimpleStmt(false)
+ } else {
+ p.error(p.pos, "missing condition in if statement")
+ }
+
+ if condStmt != nil {
+ cond = p.makeExpr(condStmt, "boolean expression")
+ }
+ if cond == nil {
+ cond = &BadExpr{From: p.pos, To: p.pos}
+ }
+ p.exprLevel = outer
+ return
+}
+
+func (p *Parser) makeExpr(s Stmt, want string) Expr {
+ if s == nil {
+ return nil
+ }
+
+ if es, isExpr := s.(*ExprStmt); isExpr {
+ return es.Expr
+ }
+
+ found := "simple statement"
+ if _, isAss := s.(*AssignStmt); isAss {
+ found = "assignment"
+ }
+ p.error(s.Pos(), fmt.Sprintf("expected %s, found %s", want, found))
+ return &BadExpr{From: s.Pos(), To: p.safePos(s.End())}
+}
+
+func (p *Parser) parseReturnStmt() Stmt {
+ if p.trace {
+ defer untracep(tracep(p, "ReturnStmt"))
+ }
+
+ pos := p.pos
+ p.expect(token.Return)
+
+ var x Expr
+ if p.token != token.Semicolon && p.token != token.RBrace {
+ x = p.parseExpr()
+ }
+ p.expectSemi()
+ return &ReturnStmt{
+ ReturnPos: pos,
+ Result: x,
+ }
+}
+
+func (p *Parser) parseExportStmt() Stmt {
+ if p.trace {
+ defer untracep(tracep(p, "ExportStmt"))
+ }
+
+ pos := p.pos
+ p.expect(token.Export)
+ x := p.parseExpr()
+ p.expectSemi()
+ return &ExportStmt{
+ ExportPos: pos,
+ Result: x,
+ }
+}
+
+func (p *Parser) parseSimpleStmt(forIn bool) Stmt {
+ if p.trace {
+ defer untracep(tracep(p, "SimpleStmt"))
+ }
+
+ x := p.parseExprList()
+
+ switch p.token {
+ case token.Assign, token.Define: // assignment statement
+ pos, tok := p.pos, p.token
+ p.next()
+ y := p.parseExprList()
+ return &AssignStmt{
+ LHS: x,
+ RHS: y,
+ Token: tok,
+ TokenPos: pos,
+ }
+ case token.In:
+ if forIn {
+ p.next()
+ y := p.parseExpr()
+
+ var key, value *Ident
+ var ok bool
+ switch len(x) {
+ case 1:
+ key = &Ident{Name: "_", NamePos: x[0].Pos()}
+
+ value, ok = x[0].(*Ident)
+ if !ok {
+ p.errorExpected(x[0].Pos(), "identifier")
+ value = &Ident{Name: "_", NamePos: x[0].Pos()}
+ }
+ case 2:
+ key, ok = x[0].(*Ident)
+ if !ok {
+ p.errorExpected(x[0].Pos(), "identifier")
+ key = &Ident{Name: "_", NamePos: x[0].Pos()}
+ }
+ value, ok = x[1].(*Ident)
+ if !ok {
+ p.errorExpected(x[1].Pos(), "identifier")
+ value = &Ident{Name: "_", NamePos: x[1].Pos()}
+ }
+ }
+ return &ForInStmt{
+ Key: key,
+ Value: value,
+ Iterable: y,
+ }
+ }
+ }
+
+ if len(x) > 1 {
+ p.errorExpected(x[0].Pos(), "1 expression")
+ // continue with first expression
+ }
+
+ switch p.token {
+ case token.Define,
+ token.AddAssign, token.SubAssign, token.MulAssign, token.QuoAssign,
+ token.RemAssign, token.AndAssign, token.OrAssign, token.XorAssign,
+ token.ShlAssign, token.ShrAssign, token.AndNotAssign:
+ pos, tok := p.pos, p.token
+ p.next()
+ y := p.parseExpr()
+ return &AssignStmt{
+ LHS: []Expr{x[0]},
+ RHS: []Expr{y},
+ Token: tok,
+ TokenPos: pos,
+ }
+ case token.Inc, token.Dec:
+ // increment or decrement statement
+ s := &IncDecStmt{Expr: x[0], Token: p.token, TokenPos: p.pos}
+ p.next()
+ return s
+ }
+ return &ExprStmt{Expr: x[0]}
+}
+
+func (p *Parser) parseExprList() (list []Expr) {
+ if p.trace {
+ defer untracep(tracep(p, "ExpressionList"))
+ }
+
+ list = append(list, p.parseExpr())
+ for p.token == token.Comma {
+ p.next()
+ list = append(list, p.parseExpr())
+ }
+ return
+}
+
+func (p *Parser) parseMapElementLit() *MapElementLit {
+ if p.trace {
+ defer untracep(tracep(p, "MapElementLit"))
+ }
+
+ pos := p.pos
+ name := "_"
+ if p.token == token.Ident {
+ name = p.tokenLit
+ } else if p.token == token.String {
+ v, _ := strconv.Unquote(p.tokenLit)
+ name = v
+ } else {
+ p.errorExpected(pos, "map key")
+ }
+ p.next()
+ colonPos := p.expect(token.Colon)
+ valueExpr := p.parseExpr()
+ return &MapElementLit{
+ Key: name,
+ KeyPos: pos,
+ ColonPos: colonPos,
+ Value: valueExpr,
+ }
+}
+
+func (p *Parser) parseMapLit() *MapLit {
+ if p.trace {
+ defer untracep(tracep(p, "MapLit"))
+ }
+
+ lbrace := p.expect(token.LBrace)
+ p.exprLevel++
+
+ var elements []*MapElementLit
+ for p.token != token.RBrace && p.token != token.EOF {
+ elements = append(elements, p.parseMapElementLit())
+
+ if !p.expectComma(token.RBrace, "map element") {
+ break
+ }
+ }
+
+ p.exprLevel--
+ rbrace := p.expect(token.RBrace)
+ return &MapLit{
+ LBrace: lbrace,
+ RBrace: rbrace,
+ Elements: elements,
+ }
+}
+
+func (p *Parser) expect(token token.Token) Pos {
+ pos := p.pos
+
+ if p.token != token {
+ p.errorExpected(pos, "'"+token.String()+"'")
+ }
+ p.next()
+ return pos
+}
+
+func (p *Parser) expectSemi() {
+ switch p.token {
+ case token.RParen, token.RBrace:
+ // semicolon is optional before a closing ')' or '}'
+ case token.Comma:
+ // permit a ',' instead of a ';' but complain
+ p.errorExpected(p.pos, "';'")
+ fallthrough
+ case token.Semicolon:
+ p.next()
+ default:
+ p.errorExpected(p.pos, "';'")
+ p.advance(stmtStart)
+ }
+}
+
+func (p *Parser) advance(to map[token.Token]bool) {
+ for ; p.token != token.EOF; p.next() {
+ if to[p.token] {
+ if p.pos == p.syncPos && p.syncCount < 10 {
+ p.syncCount++
+ return
+ }
+ if p.pos > p.syncPos {
+ p.syncPos = p.pos
+ p.syncCount = 0
+ return
+ }
+ }
+ }
+}
+
+func (p *Parser) error(pos Pos, msg string) {
+ filePos := p.file.Position(pos)
+
+ n := len(p.errors)
+ if n > 0 && p.errors[n-1].Pos.Line == filePos.Line {
+ // discard errors reported on the same line
+ return
+ }
+ if n > 10 {
+ // too many errors; terminate early
+ panic(bailout{})
+ }
+ p.errors.Add(filePos, msg)
+}
+
+func (p *Parser) errorExpected(pos Pos, msg string) {
+ msg = "expected " + msg
+ if pos == p.pos {
+ // error happened at the current position: provide more specific
+ switch {
+ case p.token == token.Semicolon && p.tokenLit == "\n":
+ msg += ", found newline"
+ case p.token.IsLiteral():
+ msg += ", found " + p.tokenLit
+ default:
+ msg += ", found '" + p.token.String() + "'"
+ }
+ }
+ p.error(pos, msg)
+}
+
+func (p *Parser) next() {
+ if p.trace && p.pos.IsValid() {
+ s := p.token.String()
+ switch {
+ case p.token.IsLiteral():
+ p.printTrace(s, p.tokenLit)
+ case p.token.IsOperator(), p.token.IsKeyword():
+ p.printTrace(`"` + s + `"`)
+ default:
+ p.printTrace(s)
+ }
+ }
+ p.token, p.tokenLit, p.pos = p.scanner.Scan()
+}
+
+func (p *Parser) printTrace(a ...interface{}) {
+ const (
+ dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ n = len(dots)
+ )
+
+ filePos := p.file.Position(p.pos)
+ _, _ = fmt.Fprintf(p.traceOut, "%5d: %5d:%3d: ", p.pos, filePos.Line,
+ filePos.Column)
+ i := 2 * p.indent
+ for i > n {
+ _, _ = fmt.Fprint(p.traceOut, dots)
+ i -= n
+ }
+ _, _ = fmt.Fprint(p.traceOut, dots[0:i])
+ _, _ = fmt.Fprintln(p.traceOut, a...)
+}
+
+func (p *Parser) safePos(pos Pos) Pos {
+ fileBase := p.file.Base
+ fileSize := p.file.Size
+
+ if int(pos) < fileBase || int(pos) > fileBase+fileSize {
+ return Pos(fileBase + fileSize)
+ }
+ return pos
+}
+
+func tracep(p *Parser, msg string) *Parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+func untracep(p *Parser) {
+ p.indent--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/d5/tengo/v2/parser/pos.go b/vendor/github.com/d5/tengo/v2/parser/pos.go
new file mode 100644
index 00000000..f8d3898c
--- /dev/null
+++ b/vendor/github.com/d5/tengo/v2/parser/pos.go
@@ -0,0 +1,12 @@
+package parser
+
+// Pos represents a position in the file set.
+type Pos int
+
+// NoPos represents an invalid position.
+const NoPos Pos = 0
+
+// IsValid returns true if the position is valid.
+func (p Pos) IsValid() bool {
+ return p != NoPos
+}
diff --git a/vendor/github.com/d5/tengo/v2/parser/scanner.go b/vendor/github.com/d5/tengo/v2/parser/scanner.go
new file mode 100644
index 00000000..f1d820a4
--- /dev/null
+++ b/vendor/github.com/d5/tengo/v2/parser/scanner.go
@@ -0,0 +1,689 @@
+package parser
+
+import (
+ "fmt"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/d5/tengo/v2/token"
+)
+
+// byte order mark
+const bom = 0xFEFF
+
+// ScanMode represents a scanner mode.
+type ScanMode int
+
+// List of scanner modes.
+const (
+ ScanComments ScanMode = 1 << iota
+ DontInsertSemis
+)
+
+// ScannerErrorHandler is an error handler for the scanner.
+type ScannerErrorHandler func(pos SourceFilePos, msg string)
+
+// Scanner reads the Tengo source text. It's based on Go's scanner
+// implementation.
+type Scanner struct {
+ file *SourceFile // source file handle
+ src []byte // source
+ ch rune // current character
+ offset int // character offset
+ readOffset int // reading offset (position after current character)
+ lineOffset int // current line offset
+ insertSemi bool // insert a semicolon before next newline
+ errorHandler ScannerErrorHandler // error reporting; or nil
+ errorCount int // number of errors encountered
+ mode ScanMode
+}
+
+// NewScanner creates a Scanner.
+func NewScanner(
+ file *SourceFile,
+ src []byte,
+ errorHandler ScannerErrorHandler,
+ mode ScanMode,
+) *Scanner {
+ if file.Size != len(src) {
+ panic(fmt.Sprintf("file size (%d) does not match src len (%d)",
+ file.Size, len(src)))
+ }
+
+ s := &Scanner{
+ file: file,
+ src: src,
+ errorHandler: errorHandler,
+ ch: ' ',
+ mode: mode,
+ }
+
+ s.next()
+ if s.ch == bom {
+ s.next() // ignore BOM at file beginning
+ }
+
+ return s
+}
+
+// ErrorCount returns the number of errors.
+func (s *Scanner) ErrorCount() int {
+ return s.errorCount
+}
+
+// Scan returns a token, token literal and its position.
+func (s *Scanner) Scan() (
+ tok token.Token,
+ literal string,
+ pos Pos,
+) {
+ s.skipWhitespace()
+
+ pos = s.file.FileSetPos(s.offset)
+
+ insertSemi := false
+
+ // determine token value
+ switch ch := s.ch; {
+ case isLetter(ch):
+ literal = s.scanIdentifier()
+ tok = token.Lookup(literal)
+ switch tok {
+ case token.Ident, token.Break, token.Continue, token.Return,
+ token.Export, token.True, token.False, token.Undefined:
+ insertSemi = true
+ }
+ case '0' <= ch && ch <= '9':
+ insertSemi = true
+ tok, literal = s.scanNumber(false)
+ default:
+ s.next() // always make progress
+
+ switch ch {
+ case -1: // EOF
+ if s.insertSemi {
+ s.insertSemi = false // EOF consumed
+ return token.Semicolon, "\n", pos
+ }
+ tok = token.EOF
+ case '\n':
+ // we only reach here if s.insertSemi was set in the first place
+ s.insertSemi = false // newline consumed
+ return token.Semicolon, "\n", pos
+ case '"':
+ insertSemi = true
+ tok = token.String
+ literal = s.scanString()
+ case '\'':
+ insertSemi = true
+ tok = token.Char
+ literal = s.scanRune()
+ case '`':
+ insertSemi = true
+ tok = token.String
+ literal = s.scanRawString()
+ case ':':
+ tok = s.switch2(token.Colon, token.Define)
+ case '.':
+ if '0' <= s.ch && s.ch <= '9' {
+ insertSemi = true
+ tok, literal = s.scanNumber(true)
+ } else {
+ tok = token.Period
+ if s.ch == '.' && s.peek() == '.' {
+ s.next()
+ s.next() // consume last '.'
+ tok = token.Ellipsis
+ }
+ }
+ case ',':
+ tok = token.Comma
+ case '?':
+ tok = token.Question
+ case ';':
+ tok = token.Semicolon
+ literal = ";"
+ case '(':
+ tok = token.LParen
+ case ')':
+ insertSemi = true
+ tok = token.RParen
+ case '[':
+ tok = token.LBrack
+ case ']':
+ insertSemi = true
+ tok = token.RBrack
+ case '{':
+ tok = token.LBrace
+ case '}':
+ insertSemi = true
+ tok = token.RBrace
+ case '+':
+ tok = s.switch3(token.Add, token.AddAssign, '+', token.Inc)
+ if tok == token.Inc {
+ insertSemi = true
+ }
+ case '-':
+ tok = s.switch3(token.Sub, token.SubAssign, '-', token.Dec)
+ if tok == token.Dec {
+ insertSemi = true
+ }
+ case '*':
+ tok = s.switch2(token.Mul, token.MulAssign)
+ case '/':
+ if s.ch == '/' || s.ch == '*' {
+ // comment
+ if s.insertSemi && s.findLineEnd() {
+ // reset position to the beginning of the comment
+ s.ch = '/'
+ s.offset = s.file.Offset(pos)
+ s.readOffset = s.offset + 1
+ s.insertSemi = false // newline consumed
+ return token.Semicolon, "\n", pos
+ }
+ comment := s.scanComment()
+ if s.mode&ScanComments == 0 {
+ // skip comment
+ s.insertSemi = false // newline consumed
+ return s.Scan()
+ }
+ tok = token.Comment
+ literal = comment
+ } else {
+ tok = s.switch2(token.Quo, token.QuoAssign)
+ }
+ case '%':
+ tok = s.switch2(token.Rem, token.RemAssign)
+ case '^':
+ tok = s.switch2(token.Xor, token.XorAssign)
+ case '<':
+ tok = s.switch4(token.Less, token.LessEq, '<',
+ token.Shl, token.ShlAssign)
+ case '>':
+ tok = s.switch4(token.Greater, token.GreaterEq, '>',
+ token.Shr, token.ShrAssign)
+ case '=':
+ tok = s.switch2(token.Assign, token.Equal)
+ case '!':
+ tok = s.switch2(token.Not, token.NotEqual)
+ case '&':
+ if s.ch == '^' {
+ s.next()
+ tok = s.switch2(token.AndNot, token.AndNotAssign)
+ } else {
+ tok = s.switch3(token.And, token.AndAssign, '&', token.LAnd)
+ }
+ case '|':
+ tok = s.switch3(token.Or, token.OrAssign, '|', token.LOr)
+ default:
+ // next reports unexpected BOMs - don't repeat
+ if ch != bom {
+ s.error(s.file.Offset(pos),
+ fmt.Sprintf("illegal character %#U", ch))
+ }
+ insertSemi = s.insertSemi // preserve insertSemi info
+ tok = token.Illegal
+ literal = string(ch)
+ }
+ }
+ if s.mode&DontInsertSemis == 0 {
+ s.insertSemi = insertSemi
+ }
+ return
+}
+
+func (s *Scanner) next() {
+ if s.readOffset < len(s.src) {
+ s.offset = s.readOffset
+ if s.ch == '\n' {
+ s.lineOffset = s.offset
+ s.file.AddLine(s.offset)
+ }
+ r, w := rune(s.src[s.readOffset]), 1
+ switch {
+ case r == 0:
+ s.error(s.offset, "illegal character NUL")
+ case r >= utf8.RuneSelf:
+ // not ASCII
+ r, w = utf8.DecodeRune(s.src[s.readOffset:])
+ if r == utf8.RuneError && w == 1 {
+ s.error(s.offset, "illegal UTF-8 encoding")
+ } else if r == bom && s.offset > 0 {
+ s.error(s.offset, "illegal byte order mark")
+ }
+ }
+ s.readOffset += w
+ s.ch = r
+ } else {
+ s.offset = len(s.src)
+ if s.ch == '\n' {
+ s.lineOffset = s.offset
+ s.file.AddLine(s.offset)
+ }
+ s.ch = -1 // eof
+ }
+}
+
+func (s *Scanner) peek() byte {
+ if s.readOffset < len(s.src) {
+ return s.src[s.readOffset]
+ }
+ return 0
+}
+
+func (s *Scanner) error(offset int, msg string) {
+ if s.errorHandler != nil {
+ s.errorHandler(s.file.Position(s.file.FileSetPos(offset)), msg)
+ }
+ s.errorCount++
+}
+
+func (s *Scanner) scanComment() string {
+ // initial '/' already consumed; s.ch == '/' || s.ch == '*'
+ offs := s.offset - 1 // position of initial '/'
+ var numCR int
+
+ if s.ch == '/' {
+ //-style comment
+ // (the final '\n' is not considered part of the comment)
+ s.next()
+ for s.ch != '\n' && s.ch >= 0 {
+ if s.ch == '\r' {
+ numCR++
+ }
+ s.next()
+ }
+ goto exit
+ }
+
+ /*-style comment */
+ s.next()
+ for s.ch >= 0 {
+ ch := s.ch
+ if ch == '\r' {
+ numCR++
+ }
+ s.next()
+ if ch == '*' && s.ch == '/' {
+ s.next()
+ goto exit
+ }
+ }
+
+ s.error(offs, "comment not terminated")
+
+exit:
+ lit := s.src[offs:s.offset]
+
+ // On Windows, a (//-comment) line may end in "\r\n".
+ // Remove the final '\r' before analyzing the text for line directives (matching the compiler).
+ // Remove any other '\r' afterwards (matching the pre-existing behavior of the scanner).
+ if numCR > 0 && len(lit) >= 2 && lit[1] == '/' && lit[len(lit)-1] == '\r' {
+ lit = lit[:len(lit)-1]
+ numCR--
+ }
+ if numCR > 0 {
+ lit = StripCR(lit, lit[1] == '*')
+ }
+ return string(lit)
+}
+
+func (s *Scanner) findLineEnd() bool {
+ // initial '/' already consumed
+
+ defer func(offs int) {
+ // reset scanner state to where it was upon calling findLineEnd
+ s.ch = '/'
+ s.offset = offs
+ s.readOffset = offs + 1
+ s.next() // consume initial '/' again
+ }(s.offset - 1)
+
+ // read ahead until a newline, EOF, or non-comment tok is found
+ for s.ch == '/' || s.ch == '*' {
+ if s.ch == '/' {
+ //-style comment always contains a newline
+ return true
+ }
+ /*-style comment: look for newline */
+ s.next()
+ for s.ch >= 0 {
+ ch := s.ch
+ if ch == '\n' {
+ return true
+ }
+ s.next()
+ if ch == '*' && s.ch == '/' {
+ s.next()
+ break
+ }
+ }
+ s.skipWhitespace() // s.insertSemi is set
+ if s.ch < 0 || s.ch == '\n' {
+ return true
+ }
+ if s.ch != '/' {
+ // non-comment tok
+ return false
+ }
+ s.next() // consume '/'
+ }
+ return false
+}
+
+func (s *Scanner) scanIdentifier() string {
+ offs := s.offset
+ for isLetter(s.ch) || isDigit(s.ch) {
+ s.next()
+ }
+ return string(s.src[offs:s.offset])
+}
+
+func (s *Scanner) scanMantissa(base int) {
+ for digitVal(s.ch) < base {
+ s.next()
+ }
+}
+
+func (s *Scanner) scanNumber(
+ seenDecimalPoint bool,
+) (tok token.Token, lit string) {
+ // digitVal(s.ch) < 10
+ offs := s.offset
+ tok = token.Int
+
+ defer func() {
+ lit = string(s.src[offs:s.offset])
+ }()
+
+ if seenDecimalPoint {
+ offs--
+ tok = token.Float
+ s.scanMantissa(10)
+ goto exponent
+ }
+
+ if s.ch == '0' {
+ // int or float
+ offs := s.offset
+ s.next()
+ if s.ch == 'x' || s.ch == 'X' {
+ // hexadecimal int
+ s.next()
+ s.scanMantissa(16)
+ if s.offset-offs <= 2 {
+ // only scanned "0x" or "0X"
+ s.error(offs, "illegal hexadecimal number")
+ }
+ } else {
+ // octal int or float
+ seenDecimalDigit := false
+ s.scanMantissa(8)
+ if s.ch == '8' || s.ch == '9' {
+ // illegal octal int or float
+ seenDecimalDigit = true
+ s.scanMantissa(10)
+ }
+ if s.ch == '.' || s.ch == 'e' || s.ch == 'E' || s.ch == 'i' {
+ goto fraction
+ }
+ // octal int
+ if seenDecimalDigit {
+ s.error(offs, "illegal octal number")
+ }
+ }
+ return
+ }
+
+ // decimal int or float
+ s.scanMantissa(10)
+
+fraction:
+ if s.ch == '.' {
+ tok = token.Float
+ s.next()
+ s.scanMantissa(10)
+ }
+
+exponent:
+ if s.ch == 'e' || s.ch == 'E' {
+ tok = token.Float
+ s.next()
+ if s.ch == '-' || s.ch == '+' {
+ s.next()
+ }
+ if digitVal(s.ch) < 10 {
+ s.scanMantissa(10)
+ } else {
+ s.error(offs, "illegal floating-point exponent")
+ }
+ }
+ return
+}
+
+func (s *Scanner) scanEscape(quote rune) bool {
+ offs := s.offset
+
+ var n int
+ var base, max uint32
+ switch s.ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
+ s.next()
+ return true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ n, base, max = 3, 8, 255
+ case 'x':
+ s.next()
+ n, base, max = 2, 16, 255
+ case 'u':
+ s.next()
+ n, base, max = 4, 16, unicode.MaxRune
+ case 'U':
+ s.next()
+ n, base, max = 8, 16, unicode.MaxRune
+ default:
+ msg := "unknown escape sequence"
+ if s.ch < 0 {
+ msg = "escape sequence not terminated"
+ }
+ s.error(offs, msg)
+ return false
+ }
+
+ var x uint32
+ for n > 0 {
+ d := uint32(digitVal(s.ch))
+ if d >= base {
+ msg := fmt.Sprintf(
+ "illegal character %#U in escape sequence", s.ch)
+ if s.ch < 0 {
+ msg = "escape sequence not terminated"
+ }
+ s.error(s.offset, msg)
+ return false
+ }
+ x = x*base + d
+ s.next()
+ n--
+ }
+
+ if x > max || 0xD800 <= x && x < 0xE000 {
+ s.error(offs, "escape sequence is invalid Unicode code point")
+ return false
+ }
+ return true
+}
+
+func (s *Scanner) scanRune() string {
+ offs := s.offset - 1 // '\'' opening already consumed
+
+ valid := true
+ n := 0
+ for {
+ ch := s.ch
+ if ch == '\n' || ch < 0 {
+ // only report error if we don't have one already
+ if valid {
+ s.error(offs, "rune literal not terminated")
+ valid = false
+ }
+ break
+ }
+ s.next()
+ if ch == '\'' {
+ break
+ }
+ n++
+ if ch == '\\' {
+ if !s.scanEscape('\'') {
+ valid = false
+ }
+ // continue to read to closing quote
+ }
+ }
+
+ if valid && n != 1 {
+ s.error(offs, "illegal rune literal")
+ }
+ return string(s.src[offs:s.offset])
+}
+
+func (s *Scanner) scanString() string {
+ offs := s.offset - 1 // '"' opening already consumed
+
+ for {
+ ch := s.ch
+ if ch == '\n' || ch < 0 {
+ s.error(offs, "string literal not terminated")
+ break
+ }
+ s.next()
+ if ch == '"' {
+ break
+ }
+ if ch == '\\' {
+ s.scanEscape('"')
+ }
+ }
+ return string(s.src[offs:s.offset])
+}
+
+func (s *Scanner) scanRawString() string {
+ offs := s.offset - 1 // '`' opening already consumed
+
+ hasCR := false
+ for {
+ ch := s.ch
+ if ch < 0 {
+ s.error(offs, "raw string literal not terminated")
+ break
+ }
+
+ s.next()
+
+ if ch == '`' {
+ break
+ }
+
+ if ch == '\r' {
+ hasCR = true
+ }
+ }
+
+ lit := s.src[offs:s.offset]
+ if hasCR {
+ lit = StripCR(lit, false)
+ }
+ return string(lit)
+}
+
+// StripCR removes carriage return characters.
+func StripCR(b []byte, comment bool) []byte {
+ c := make([]byte, len(b))
+ i := 0
+ for j, ch := range b {
+ // In a /*-style comment, don't strip \r from *\r/ (incl. sequences of
+ // \r from *\r\r...\r/) since the resulting */ would terminate the
+ // comment too early unless the \r is immediately following the opening
+ // /* in which case it's ok because /*/ is not closed yet.
+ if ch != '\r' || comment && i > len("/*") && c[i-1] == '*' &&
+ j+1 < len(b) && b[j+1] == '/' {
+ c[i] = ch
+ i++
+ }
+ }
+ return c[:i]
+}
+
+func (s *Scanner) skipWhitespace() {
+ for s.ch == ' ' || s.ch == '\t' || s.ch == '\n' && !s.insertSemi ||
+ s.ch == '\r' {
+ s.next()
+ }
+}
+
+func (s *Scanner) switch2(tok0, tok1 token.Token) token.Token {
+ if s.ch == '=' {
+ s.next()
+ return tok1
+ }
+ return tok0
+}
+
+func (s *Scanner) switch3(
+ tok0, tok1 token.Token,
+ ch2 rune,
+ tok2 token.Token,
+) token.Token {
+ if s.ch == '=' {
+ s.next()
+ return tok1
+ }
+ if s.ch == ch2 {
+ s.next()
+ return tok2
+ }
+ return tok0
+}
+
+func (s *Scanner) switch4(
+ tok0, tok1 token.Token,
+ ch2 rune,
+ tok2, tok3 token.Token,
+) token.Token {
+ if s.ch == '=' {
+ s.next()
+ return tok1
+ }
+ if s.ch == ch2 {
+ s.next()
+ if s.ch == '=' {
+ s.next()
+ return tok3
+ }
+ return tok2
+ }
+ return tok0
+}
+
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' ||
+ ch >= utf8.RuneSelf && unicode.IsLetter(ch)
+}
+
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' ||
+ ch >= utf8.RuneSelf && unicode.IsDigit(ch)
+}
+
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/d5/tengo/v2/parser/source_file.go b/vendor/github.com/d5/tengo/v2/parser/source_file.go
new file mode 100644
index 00000000..e9f4b0f5
--- /dev/null
+++ b/vendor/github.com/d5/tengo/v2/parser/source_file.go
@@ -0,0 +1,231 @@
+package parser
+
+import (
+ "fmt"
+ "sort"
+)
+
+// SourceFilePos represents a position information in the file.
+type SourceFilePos struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (byte count)
+}
+
+// IsValid returns true if the position is valid.
+func (p SourceFilePos) IsValid() bool {
+ return p.Line > 0
+}
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// file:line valid position with file name but no column (column == 0)
+// line:column valid position without file name
+// line valid position without file name and no column (column == 0)
+// file invalid position with file name
+// - invalid position without file name
+//
+func (p SourceFilePos) String() string {
+ s := p.Filename
+ if p.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d", p.Line)
+ if p.Column != 0 {
+ s += fmt.Sprintf(":%d", p.Column)
+ }
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// SourceFileSet represents a set of source files.
+type SourceFileSet struct {
+ Base int // base offset for the next file
+ Files []*SourceFile // list of files in the order added to the set
+ LastFile *SourceFile // cache of last file looked up
+}
+
+// NewFileSet creates a new file set.
+func NewFileSet() *SourceFileSet {
+ return &SourceFileSet{
+ Base: 1, // 0 == NoPos
+ }
+}
+
+// AddFile adds a new file in the file set.
+func (s *SourceFileSet) AddFile(filename string, base, size int) *SourceFile {
+ if base < 0 {
+ base = s.Base
+ }
+ if base < s.Base || size < 0 {
+ panic("illegal base or size")
+ }
+ f := &SourceFile{
+ set: s,
+ Name: filename,
+ Base: base,
+ Size: size,
+ Lines: []int{0},
+ }
+ base += size + 1 // +1 because EOF also has a position
+ if base < 0 {
+ panic("offset overflow (> 2G of source code in file set)")
+ }
+
+ // add the file to the file set
+ s.Base = base
+ s.Files = append(s.Files, f)
+ s.LastFile = f
+ return f
+}
+
+// File returns the file that contains the position p. If no such file is
+// found (for instance for p == NoPos), the result is nil.
+func (s *SourceFileSet) File(p Pos) (f *SourceFile) {
+ if p != NoPos {
+ f = s.file(p)
+ }
+ return
+}
+
+// Position converts a SourcePos p in the fileset into a SourceFilePos value.
+func (s *SourceFileSet) Position(p Pos) (pos SourceFilePos) {
+ if p != NoPos {
+ if f := s.file(p); f != nil {
+ return f.position(p)
+ }
+ }
+ return
+}
+
+func (s *SourceFileSet) file(p Pos) *SourceFile {
+ // common case: p is in last file
+ f := s.LastFile
+ if f != nil && f.Base <= int(p) && int(p) <= f.Base+f.Size {
+ return f
+ }
+
+ // p is not in last file - search all files
+ if i := searchFiles(s.Files, int(p)); i >= 0 {
+ f := s.Files[i]
+
+ // f.base <= int(p) by definition of searchFiles
+ if int(p) <= f.Base+f.Size {
+ s.LastFile = f // race is ok - s.last is only a cache
+ return f
+ }
+ }
+ return nil
+}
+
+func searchFiles(a []*SourceFile, x int) int {
+ return sort.Search(len(a), func(i int) bool { return a[i].Base > x }) - 1
+}
+
+// SourceFile represents a source file.
+type SourceFile struct {
+ // SourceFile set for the file
+ set *SourceFileSet
+ // SourceFile name as provided to AddFile
+ Name string
+ // SourcePos value range for this file is [base...base+size]
+ Base int
+ // SourceFile size as provided to AddFile
+ Size int
+ // Lines contains the offset of the first character for each line
+ // (the first entry is always 0)
+ Lines []int
+}
+
+// Set returns SourceFileSet.
+func (f *SourceFile) Set() *SourceFileSet {
+ return f.set
+}
+
+// LineCount returns the current number of lines.
+func (f *SourceFile) LineCount() int {
+ return len(f.Lines)
+}
+
+// AddLine adds a new line.
+func (f *SourceFile) AddLine(offset int) {
+ i := len(f.Lines)
+ if (i == 0 || f.Lines[i-1] < offset) && offset < f.Size {
+ f.Lines = append(f.Lines, offset)
+ }
+}
+
+// LineStart returns the position of the first character in the line.
+func (f *SourceFile) LineStart(line int) Pos {
+ if line < 1 {
+ panic("illegal line number (line numbering starts at 1)")
+ }
+ if line > len(f.Lines) {
+ panic("illegal line number")
+ }
+ return Pos(f.Base + f.Lines[line-1])
+}
+
+// FileSetPos returns the position in the file set.
+func (f *SourceFile) FileSetPos(offset int) Pos {
+ if offset > f.Size {
+ panic("illegal file offset")
+ }
+ return Pos(f.Base + offset)
+}
+
+// Offset translates the file set position into the file offset.
+func (f *SourceFile) Offset(p Pos) int {
+ if int(p) < f.Base || int(p) > f.Base+f.Size {
+ panic("illegal SourcePos value")
+ }
+ return int(p) - f.Base
+}
+
+// Position translates the file set position into the file position.
+func (f *SourceFile) Position(p Pos) (pos SourceFilePos) {
+ if p != NoPos {
+ if int(p) < f.Base || int(p) > f.Base+f.Size {
+ panic("illegal SourcePos value")
+ }
+ pos = f.position(p)
+ }
+ return
+}
+
+func (f *SourceFile) position(p Pos) (pos SourceFilePos) {
+ offset := int(p) - f.Base
+ pos.Offset = offset
+ pos.Filename, pos.Line, pos.Column = f.unpack(offset)
+ return
+}
+
+func (f *SourceFile) unpack(offset int) (filename string, line, column int) {
+ filename = f.Name
+ if i := searchInts(f.Lines, offset); i >= 0 {
+ line, column = i+1, offset-f.Lines[i]+1
+ }
+ return
+}
+
+func searchInts(a []int, x int) int {
+ // This function body is a manually inlined version of:
+ // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
+ i, j := 0, len(a)
+ for i < j {
+ h := i + (j-i)/2 // avoid overflow when computing h
+ // i ≤ h < j
+ if a[h] <= x {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ return i - 1
+}
diff --git a/vendor/github.com/d5/tengo/v2/parser/stmt.go b/vendor/github.com/d5/tengo/v2/parser/stmt.go
new file mode 100644
index 00000000..c0848c48
--- /dev/null
+++ b/vendor/github.com/d5/tengo/v2/parser/stmt.go
@@ -0,0 +1,349 @@
+package parser
+
+import (
+ "strings"
+
+ "github.com/d5/tengo/v2/token"
+)
+
+// Stmt represents a statement in the AST.
+type Stmt interface {
+ Node
+ stmtNode()
+}
+
+// AssignStmt represents an assignment statement.
+type AssignStmt struct {
+ LHS []Expr
+ RHS []Expr
+ Token token.Token
+ TokenPos Pos
+}
+
+func (s *AssignStmt) stmtNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (s *AssignStmt) Pos() Pos {
+ return s.LHS[0].Pos()
+}
+
+// End returns the position of first character immediately after the node.
+func (s *AssignStmt) End() Pos {
+ return s.RHS[len(s.RHS)-1].End()
+}
+
+func (s *AssignStmt) String() string {
+ var lhs, rhs []string
+ for _, e := range s.LHS {
+ lhs = append(lhs, e.String())
+ }
+ for _, e := range s.RHS {
+ rhs = append(rhs, e.String())
+ }
+ return strings.Join(lhs, ", ") + " " + s.Token.String() +
+ " " + strings.Join(rhs, ", ")
+}
+
+// BadStmt represents a bad statement.
+type BadStmt struct {
+ From Pos
+ To Pos
+}
+
+func (s *BadStmt) stmtNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (s *BadStmt) Pos() Pos {
+ return s.From
+}
+
+// End returns the position of first character immediately after the node.
+func (s *BadStmt) End() Pos {
+ return s.To
+}
+
+func (s *BadStmt) String() string {
+ return "<bad statement>"
+}
+
+// BlockStmt represents a block statement.
+type BlockStmt struct {
+ Stmts []Stmt
+ LBrace Pos
+ RBrace Pos
+}
+
+func (s *BlockStmt) stmtNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (s *BlockStmt) Pos() Pos {
+ return s.LBrace
+}
+
+// End returns the position of first character immediately after the node.
+func (s *BlockStmt) End() Pos {
+ return s.RBrace + 1
+}
+
+func (s *BlockStmt) String() string {
+ var list []string
+ for _, e := range s.Stmts {
+ list = append(list, e.String())
+ }
+ return "{" + strings.Join(list, "; ") + "}"
+}
+
+// BranchStmt represents a branch statement.
+type BranchStmt struct {
+ Token token.Token
+ TokenPos Pos
+ Label *Ident
+}
+
+func (s *BranchStmt) stmtNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (s *BranchStmt) Pos() Pos {
+ return s.TokenPos
+}
+
+// End returns the position of first character immediately after the node.
+func (s *BranchStmt) End() Pos {
+ if s.Label != nil {
+ return s.Label.End()
+ }
+
+ return Pos(int(s.TokenPos) + len(s.Token.String()))
+}
+
+func (s *BranchStmt) String() string {
+ var label string
+ if s.Label != nil {
+ label = " " + s.Label.Name
+ }
+ return s.Token.String() + label
+}
+
+// EmptyStmt represents an empty statement.
+type EmptyStmt struct {
+ Semicolon Pos
+ Implicit bool
+}
+
+func (s *EmptyStmt) stmtNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (s *EmptyStmt) Pos() Pos {
+ return s.Semicolon
+}
+
+// End returns the position of first character immediately after the node.
+func (s *EmptyStmt) End() Pos {
+ if s.Implicit {
+ return s.Semicolon
+ }
+ return s.Semicolon + 1
+}
+
+func (s *EmptyStmt) String() string {
+ return ";"
+}
+
+// ExportStmt represents an export statement.
+type ExportStmt struct {
+ ExportPos Pos
+ Result Expr
+}
+
+func (s *ExportStmt) stmtNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (s *ExportStmt) Pos() Pos {
+ return s.ExportPos
+}
+
+// End returns the position of first character immediately after the node.
+func (s *ExportStmt) End() Pos {
+ return s.Result.End()
+}
+
+func (s *ExportStmt) String() string {
+ return "export " + s.Result.String()
+}
+
+// ExprStmt represents an expression statement.
+type ExprStmt struct {
+ Expr Expr
+}
+
+func (s *ExprStmt) stmtNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (s *ExprStmt) Pos() Pos {
+ return s.Expr.Pos()
+}
+
+// End returns the position of first character immediately after the node.
+func (s *ExprStmt) End() Pos {
+ return s.Expr.End()
+}
+
+func (s *ExprStmt) String() string {
+ return s.Expr.String()
+}
+
+// ForInStmt represents a for-in statement.
+type ForInStmt struct {
+ ForPos Pos
+ Key *Ident
+ Value *Ident
+ Iterable Expr
+ Body *BlockStmt
+}
+
+func (s *ForInStmt) stmtNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (s *ForInStmt) Pos() Pos {
+ return s.ForPos
+}
+
+// End returns the position of first character immediately after the node.
+func (s *ForInStmt) End() Pos {
+ return s.Body.End()
+}
+
+func (s *ForInStmt) String() string {
+ if s.Value != nil {
+ return "for " + s.Key.String() + ", " + s.Value.String() +
+ " in " + s.Iterable.String() + " " + s.Body.String()
+ }
+ return "for " + s.Key.String() + " in " + s.Iterable.String() +
+ " " + s.Body.String()
+}
+
+// ForStmt represents a for statement.
+type ForStmt struct {
+ ForPos Pos
+ Init Stmt
+ Cond Expr
+ Post Stmt
+ Body *BlockStmt
+}
+
+func (s *ForStmt) stmtNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (s *ForStmt) Pos() Pos {
+ return s.ForPos
+}
+
+// End returns the position of first character immediately after the node.
+func (s *ForStmt) End() Pos {
+ return s.Body.End()
+}
+
+func (s *ForStmt) String() string {
+ var init, cond, post string
+ if s.Init != nil {
+ init = s.Init.String()
+ }
+ if s.Cond != nil {
+ cond = s.Cond.String() + " "
+ }
+ if s.Post != nil {
+ post = s.Post.String()
+ }
+
+ if init != "" || post != "" {
+ return "for " + init + " ; " + cond + " ; " + post + s.Body.String()
+ }
+ return "for " + cond + s.Body.String()
+}
+
+// IfStmt represents an if statement.
+type IfStmt struct {
+ IfPos Pos
+ Init Stmt
+ Cond Expr
+ Body *BlockStmt
+ Else Stmt // else branch; or nil
+}
+
+func (s *IfStmt) stmtNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (s *IfStmt) Pos() Pos {
+ return s.IfPos
+}
+
+// End returns the position of first character immediately after the node.
+func (s *IfStmt) End() Pos {
+ if s.Else != nil {
+ return s.Else.End()
+ }
+ return s.Body.End()
+}
+
+func (s *IfStmt) String() string {
+ var initStmt, elseStmt string
+ if s.Init != nil {
+ initStmt = s.Init.String() + "; "
+ }
+ if s.Else != nil {
+ elseStmt = " else " + s.Else.String()
+ }
+ return "if " + initStmt + s.Cond.String() + " " +
+ s.Body.String() + elseStmt
+}
+
+// IncDecStmt represents increment or decrement statement.
+type IncDecStmt struct {
+ Expr Expr
+ Token token.Token
+ TokenPos Pos
+}
+
+func (s *IncDecStmt) stmtNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (s *IncDecStmt) Pos() Pos {
+ return s.Expr.Pos()
+}
+
+// End returns the position of first character immediately after the node.
+func (s *IncDecStmt) End() Pos {
+ return Pos(int(s.TokenPos) + 2)
+}
+
+func (s *IncDecStmt) String() string {
+ return s.Expr.String() + s.Token.String()
+}
+
+// ReturnStmt represents a return statement.
+type ReturnStmt struct {
+ ReturnPos Pos
+ Result Expr
+}
+
+func (s *ReturnStmt) stmtNode() {}
+
+// Pos returns the position of first character belonging to the node.
+func (s *ReturnStmt) Pos() Pos {
+ return s.ReturnPos
+}
+
+// End returns the position of first character immediately after the node.
+func (s *ReturnStmt) End() Pos {
+ if s.Result != nil {
+ return s.Result.End()
+ }
+ return s.ReturnPos + 6
+}
+
+func (s *ReturnStmt) String() string {
+ if s.Result != nil {
+ return "return " + s.Result.String()
+ }
+ return "return"
+}