From 0917dc876613fd71c9726a34bf0138b4f5121be9 Mon Sep 17 00:00:00 2001
From: Benjamin <b.mpickford@outlook.com>
Date: Mon, 18 Nov 2019 06:18:01 +1000
Subject: Update markdown parsing library to github.com/gomarkdown/markdown
 (#944)

---
 .../github.com/gomarkdown/markdown/parser/aside.go |   73 +
 .../gomarkdown/markdown/parser/attribute.go        |  116 ++
 .../github.com/gomarkdown/markdown/parser/block.go | 1978 ++++++++++++++++++++
 .../gomarkdown/markdown/parser/callout.go          |   29 +
 .../gomarkdown/markdown/parser/caption.go          |   70 +
 .../gomarkdown/markdown/parser/citation.go         |   86 +
 .../github.com/gomarkdown/markdown/parser/esc.go   |   20 +
 .../gomarkdown/markdown/parser/figures.go          |  119 ++
 .../gomarkdown/markdown/parser/include.go          |  129 ++
 .../gomarkdown/markdown/parser/inline.go           | 1284 +++++++++++++
 .../gomarkdown/markdown/parser/matter.go           |   36 +
 .../gomarkdown/markdown/parser/options.go          |   32 +
 .../gomarkdown/markdown/parser/parser.go           |  812 ++++++++
 .../github.com/gomarkdown/markdown/parser/ref.go   |   89 +
 14 files changed, 4873 insertions(+)
 create mode 100644 vendor/github.com/gomarkdown/markdown/parser/aside.go
 create mode 100644 vendor/github.com/gomarkdown/markdown/parser/attribute.go
 create mode 100644 vendor/github.com/gomarkdown/markdown/parser/block.go
 create mode 100644 vendor/github.com/gomarkdown/markdown/parser/callout.go
 create mode 100644 vendor/github.com/gomarkdown/markdown/parser/caption.go
 create mode 100644 vendor/github.com/gomarkdown/markdown/parser/citation.go
 create mode 100644 vendor/github.com/gomarkdown/markdown/parser/esc.go
 create mode 100644 vendor/github.com/gomarkdown/markdown/parser/figures.go
 create mode 100644 vendor/github.com/gomarkdown/markdown/parser/include.go
 create mode 100644 vendor/github.com/gomarkdown/markdown/parser/inline.go
 create mode 100644 vendor/github.com/gomarkdown/markdown/parser/matter.go
 create mode 100644 vendor/github.com/gomarkdown/markdown/parser/options.go
 create mode 100644 vendor/github.com/gomarkdown/markdown/parser/parser.go
 create mode 100644 vendor/github.com/gomarkdown/markdown/parser/ref.go

(limited to 'vendor/github.com/gomarkdown/markdown/parser')

diff --git a/vendor/github.com/gomarkdown/markdown/parser/aside.go b/vendor/github.com/gomarkdown/markdown/parser/aside.go
new file mode 100644
index 00000000..96e25fe0
--- /dev/null
+++ b/vendor/github.com/gomarkdown/markdown/parser/aside.go
@@ -0,0 +1,73 @@
+package parser
+
+import (
+	"bytes"
+
+	"github.com/gomarkdown/markdown/ast"
+)
+
+// returns aisde prefix length
+func (p *Parser) asidePrefix(data []byte) int {
+	i := 0
+	n := len(data)
+	for i < 3 && i < n && data[i] == ' ' {
+		i++
+	}
+	if i+1 < n && data[i] == 'A' && data[i+1] == '>' {
+		if i+2 < n && data[i+2] == ' ' {
+			return i + 3
+		}
+		return i + 2
+	}
+	return 0
+}
+
+// aside ends with at least one blank line
+// followed by something without a aside prefix
+func (p *Parser) terminateAside(data []byte, beg, end int) bool {
+	if p.isEmpty(data[beg:]) <= 0 {
+		return false
+	}
+	if end >= len(data) {
+		return true
+	}
+	return p.asidePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
+}
+
+// parse a aside fragment
+func (p *Parser) aside(data []byte) int {
+	var raw bytes.Buffer
+	beg, end := 0, 0
+	// identical to quote
+	for beg < len(data) {
+		end = beg
+		// Step over whole lines, collecting them. While doing that, check for
+		// fenced code and if one's found, incorporate it altogether,
+		// irregardless of any contents inside it
+		for end < len(data) && data[end] != '\n' {
+			if p.extensions&FencedCode != 0 {
+				if i := p.fencedCodeBlock(data[end:], false); i > 0 {
+					// -1 to compensate for the extra end++ after the loop:
+					end += i - 1
+					break
+				}
+			}
+			end++
+		}
+		end = skipCharN(data, end, '\n', 1)
+		if pre := p.asidePrefix(data[beg:]); pre > 0 {
+			// skip the prefix
+			beg += pre
+		} else if p.terminateAside(data, beg, end) {
+			break
+		}
+		// this line is part of the aside
+		raw.Write(data[beg:end])
+		beg = end
+	}
+
+	block := p.addBlock(&ast.Aside{})
+	p.block(raw.Bytes())
+	p.finalize(block)
+	return end
+}
diff --git a/vendor/github.com/gomarkdown/markdown/parser/attribute.go b/vendor/github.com/gomarkdown/markdown/parser/attribute.go
new file mode 100644
index 00000000..5fdb0709
--- /dev/null
+++ b/vendor/github.com/gomarkdown/markdown/parser/attribute.go
@@ -0,0 +1,116 @@
+package parser
+
+import (
+	"bytes"
+
+	"github.com/gomarkdown/markdown/ast"
+)
+
+// attribute parses a (potential) block attribute and adds it to p.
+func (p *Parser) attribute(data []byte) []byte {
+	if len(data) < 3 {
+		return data
+	}
+	i := 0
+	if data[i] != '{' {
+		return data
+	}
+	i++
+
+	// last character must be a } otherwise it's not an attribute
+	end := skipUntilChar(data, i, '\n')
+	if data[end-1] != '}' {
+		return data
+	}
+
+	i = skipSpace(data, i)
+	b := &ast.Attribute{Attrs: make(map[string][]byte)}
+
+	esc := false
+	quote := false
+	trail := 0
+Loop:
+	for ; i < len(data); i++ {
+		switch data[i] {
+		case ' ', '\t', '\f', '\v':
+			if quote {
+				continue
+			}
+			chunk := data[trail+1 : i]
+			if len(chunk) == 0 {
+				trail = i
+				continue
+			}
+			switch {
+			case chunk[0] == '.':
+				b.Classes = append(b.Classes, chunk[1:])
+			case chunk[0] == '#':
+				b.ID = chunk[1:]
+			default:
+				k, v := keyValue(chunk)
+				if k != nil && v != nil {
+					b.Attrs[string(k)] = v
+				} else {
+					// this is illegal in an attribute
+					return data
+				}
+			}
+			trail = i
+		case '"':
+			if esc {
+				esc = !esc
+				continue
+			}
+			quote = !quote
+		case '\\':
+			esc = !esc
+		case '}':
+			if esc {
+				esc = !esc
+				continue
+			}
+			chunk := data[trail+1 : i]
+			if len(chunk) == 0 {
+				return data
+			}
+			switch {
+			case chunk[0] == '.':
+				b.Classes = append(b.Classes, chunk[1:])
+			case chunk[0] == '#':
+				b.ID = chunk[1:]
+			default:
+				k, v := keyValue(chunk)
+				if k != nil && v != nil {
+					b.Attrs[string(k)] = v
+				} else {
+					return data
+				}
+			}
+			i++
+			break Loop
+		default:
+			esc = false
+		}
+	}
+
+	p.attr = b
+	return data[i:]
+}
+
+// key="value" quotes are mandatory.
+func keyValue(data []byte) ([]byte, []byte) {
+	chunk := bytes.SplitN(data, []byte{'='}, 2)
+	if len(chunk) != 2 {
+		return nil, nil
+	}
+	key := chunk[0]
+	value := chunk[1]
+
+	if len(value) < 3 || len(key) == 0 {
+		return nil, nil
+	}
+	if value[0] != '"' || value[len(value)-1] != '"' {
+		return key, nil
+	}
+	return key, value[1 : len(value)-1]
+}
diff --git a/vendor/github.com/gomarkdown/markdown/parser/block.go b/vendor/github.com/gomarkdown/markdown/parser/block.go
new file mode 100644
index 00000000..18a2dd89
--- /dev/null
+++ b/vendor/github.com/gomarkdown/markdown/parser/block.go
@@ -0,0 +1,1978 @@
+package parser
+
+import (
+	"bytes"
+	"html"
+	"regexp"
+	"strconv"
+	"unicode"
+
+	"github.com/gomarkdown/markdown/ast"
+)
+
+// Parsing block-level elements.
+
+const (
+	charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});"
+	escapable  = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]"
+)
+
+var (
+	reBackslashOrAmp      = regexp.MustCompile("[\\&]")
+	reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity)
+
+	// blockTags is a set of tags that are recognized as HTML block tags.
+	// Any of these can be included in markdown text without special escaping.
+	blockTags = map[string]struct{}{
+		"blockquote": struct{}{},
+		"del":        struct{}{},
+		"div":        struct{}{},
+		"dl":         struct{}{},
+		"fieldset":   struct{}{},
+		"form":       struct{}{},
+		"h1":         struct{}{},
+		"h2":         struct{}{},
+		"h3":         struct{}{},
+		"h4":         struct{}{},
+		"h5":         struct{}{},
+		"h6":         struct{}{},
+		"iframe":     struct{}{},
+		"ins":        struct{}{},
+		"math":       struct{}{},
+		"noscript":   struct{}{},
+		"ol":         struct{}{},
+		"pre":        struct{}{},
+		"p":          struct{}{},
+		"script":     struct{}{},
+		"style":      struct{}{},
+		"table":      struct{}{},
+		"ul":         struct{}{},
+
+		// HTML5
+		"address":    struct{}{},
+		"article":    struct{}{},
+		"aside":      struct{}{},
+		"canvas":     struct{}{},
+		"figcaption": struct{}{},
+		"figure":     struct{}{},
+		"footer":     struct{}{},
+		"header":     struct{}{},
+		"hgroup":     struct{}{},
+		"main":       struct{}{},
+		"nav":        struct{}{},
+		"output":     struct{}{},
+		"progress":   struct{}{},
+		"section":    struct{}{},
+		"video":      struct{}{},
+	}
+)
+
+// sanitizeAnchorName returns a sanitized anchor name for the given text.
+// Taken from https://github.com/shurcooL/sanitized_anchor_name/blob/master/main.go#L14:1
+func sanitizeAnchorName(text string) string {
+	var anchorName []rune
+	var futureDash = false
+	for _, r := range text {
+		switch {
+		case unicode.IsLetter(r) || unicode.IsNumber(r):
+			if futureDash && len(anchorName) > 0 {
+				anchorName = append(anchorName, '-')
+			}
+			futureDash = false
+			anchorName = append(anchorName, unicode.ToLower(r))
+		default:
+			futureDash = true
+		}
+	}
+	return string(anchorName)
+}
+
+// Parse block-level data.
+// Note: this function and many that it calls assume that
+// the input buffer ends with a newline.
+func (p *Parser) block(data []byte) {
+	// this is called recursively: enforce a maximum depth
+	if p.nesting >= p.maxNesting {
+		return
+	}
+	p.nesting++
+
+	// parse out one block-level construct at a time
+	for len(data) > 0 {
+		// attributes that can be specific before a block element:
+		//
+		// {#id .class1 .class2 key="value"}
+		if p.extensions&Attributes != 0 {
+			data = p.attribute(data)
+		}
+
+		if p.extensions&Includes != 0 {
+			f := p.readInclude
+			path, address, consumed := p.isInclude(data)
+			if consumed == 0 {
+				path, address, consumed = p.isCodeInclude(data)
+				f = p.readCodeInclude
+			}
+			if consumed > 0 {
+				included := f(p.includeStack.Last(), path, address)
+				p.includeStack.Push(path)
+				p.block(included)
+				p.includeStack.Pop()
+				data = data[consumed:]
+				continue
+			}
+		}
+
+		// user supplied parser function
+		if p.Opts.ParserHook != nil {
+			node, blockdata, consumed := p.Opts.ParserHook(data)
+			if consumed > 0 {
+				data = data[consumed:]
+
+				if node != nil {
+					p.addBlock(node)
+					if blockdata != nil {
+						p.block(blockdata)
+						p.finalize(node)
+					}
+				}
+				continue
+			}
+		}
+
+		// prefixed heading:
+		//
+		// # Heading 1
+		// ## Heading 2
+		// ...
+		// ###### Heading 6
+		if p.isPrefixHeading(data) {
+			data = data[p.prefixHeading(data):]
+			continue
+		}
+
+		// prefixed special heading:
+		// (there are no levels.)
+		//
+		// .# Abstract
+		if p.isPrefixSpecialHeading(data) {
+			data = data[p.prefixSpecialHeading(data):]
+			continue
+		}
+
+		// block of preformatted HTML:
+		//
+		// <div>
+		//     ...
+		// </div>
+		if data[0] == '<' {
+			if i := p.html(data, true); i > 0 {
+				data = data[i:]
+				continue
+			}
+		}
+
+		// title block
+		//
+		// % stuff
+		// % more stuff
+		// % even more stuff
+		if p.extensions&Titleblock != 0 {
+			if data[0] == '%' {
+				if i := p.titleBlock(data, true); i > 0 {
+					data = data[i:]
+					continue
+				}
+			}
+		}
+
+		// blank lines.  note: returns the # of bytes to skip
+		if i := p.isEmpty(data); i > 0 {
+			data = data[i:]
+			continue
+		}
+
+		// indented code block:
+		//
+		//     func max(a, b int) int {
+		//         if a > b {
+		//             return a
+		//         }
+		//         return b
+		//      }
+		if p.codePrefix(data) > 0 {
+			data = data[p.code(data):]
+			continue
+		}
+
+		// fenced code block:
+		//
+		// ``` go
+		// func fact(n int) int {
+		//     if n <= 1 {
+		//         return n
+		//     }
+		//     return n * fact(n-1)
+		// }
+		// ```
+		if p.extensions&FencedCode != 0 {
+			if i := p.fencedCodeBlock(data, true); i > 0 {
+				data = data[i:]
+				continue
+			}
+		}
+
+		// horizontal rule:
+		//
+		// ------
+		// or
+		// ******
+		// or
+		// ______
+		if p.isHRule(data) {
+			p.addBlock(&ast.HorizontalRule{})
+			i := skipUntilChar(data, 0, '\n')
+			data = data[i:]
+			continue
+		}
+
+		// block quote:
+		//
+		// > A big quote I found somewhere
+		// > on the web
+		if p.quotePrefix(data) > 0 {
+			data = data[p.quote(data):]
+			continue
+		}
+
+		// aside:
+		//
+		// A> The proof is too large to fit
+		// A> in the margin.
+		if p.extensions&Mmark != 0 {
+			if p.asidePrefix(data) > 0 {
+				data = data[p.aside(data):]
+				continue
+			}
+		}
+
+		// figure block:
+		//
+		// !---
+		// ![Alt Text](img.jpg "This is an image")
+		// ![Alt Text](img2.jpg "This is a second image")
+		// !---
+		if p.extensions&Mmark != 0 {
+			if i := p.figureBlock(data, true); i > 0 {
+				data = data[i:]
+				continue
+			}
+		}
+
+		// table:
+		//
+		// Name  | Age | Phone
+		// ------|-----|---------
+		// Bob   | 31  | 555-1234
+		// Alice | 27  | 555-4321
+		if p.extensions&Tables != 0 {
+			if i := p.table(data); i > 0 {
+				data = data[i:]
+				continue
+			}
+		}
+
+		// an itemized/unordered list:
+		//
+		// * Item 1
+		// * Item 2
+		//
+		// also works with + or -
+		if p.uliPrefix(data) > 0 {
+			data = data[p.list(data, 0, 0):]
+			continue
+		}
+
+		// a numbered/ordered list:
+		//
+		// 1. Item 1
+		// 2. Item 2
+		if i := p.oliPrefix(data); i > 0 {
+			start := 0
+			if i > 2 && p.extensions&OrderedListStart != 0 {
+				s := string(data[:i-2])
+				start, _ = strconv.Atoi(s)
+				if start == 1 {
+					start = 0
+				}
+			}
+			data = data[p.list(data, ast.ListTypeOrdered, start):]
+			continue
+		}
+
+		// definition lists:
+		//
+		// Term 1
+		// :   Definition a
+		// :   Definition b
+		//
+		// Term 2
+		// :   Definition c
+		if p.extensions&DefinitionLists != 0 {
+			if p.dliPrefix(data) > 0 {
+				data = data[p.list(data, ast.ListTypeDefinition, 0):]
+				continue
+			}
+		}
+
+		if p.extensions&MathJax != 0 {
+			if i := p.blockMath(data); i > 0 {
+				data = data[i:]
+				continue
+			}
+		}
+
+		// document matters:
+		//
+		// {frontmatter}/{mainmatter}/{backmatter}
+		if p.extensions&Mmark != 0 {
+			if i := p.documentMatter(data); i > 0 {
+				data = data[i:]
+				continue
+			}
+		}
+
+		// anything else must look like a normal paragraph
+		// note: this finds underlined headings, too
+		idx := p.paragraph(data)
+		data = data[idx:]
+	}
+
+	p.nesting--
+}
+
+func (p *Parser) addBlock(n ast.Node) ast.Node {
+	p.closeUnmatchedBlocks()
+
+	if p.attr != nil {
+		if c := n.AsContainer(); c != nil {
+			c.Attribute = p.attr
+		}
+		if l := n.AsLeaf(); l != nil {
+			l.Attribute = p.attr
+		}
+		p.attr = nil
+	}
+	return p.addChild(n)
+}
+
+func (p *Parser) isPrefixHeading(data []byte) bool {
+	if data[0] != '#' {
+		return false
+	}
+
+	if p.extensions&SpaceHeadings != 0 {
+		level := skipCharN(data, 0, '#', 6)
+		if level == len(data) || data[level] != ' ' {
+			return false
+		}
+	}
+	return true
+}
+
+func (p *Parser) prefixHeading(data []byte) int {
+	level := skipCharN(data, 0, '#', 6)
+	i := skipChar(data, level, ' ')
+	end := skipUntilChar(data, i, '\n')
+	skip := end
+	id := ""
+	if p.extensions&HeadingIDs != 0 {
+		j, k := 0, 0
+		// find start/end of heading id
+		for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
+		}
+		for k = j + 1; k < end && data[k] != '}'; k++ {
+		}
+		// extract heading id iff found
+		if j < end && k < end {
+			id = string(data[j+2 : k])
+			end = j
+			skip = k + 1
+			for end > 0 && data[end-1] == ' ' {
+				end--
+			}
+		}
+	}
+	for end > 0 && data[end-1] == '#' {
+		if isBackslashEscaped(data, end-1) {
+			break
+		}
+		end--
+	}
+	for end > 0 && data[end-1] == ' ' {
+		end--
+	}
+	if end > i {
+		if id == "" && p.extensions&AutoHeadingIDs != 0 {
+			id = sanitizeAnchorName(string(data[i:end]))
+		}
+		block := &ast.Heading{
+			HeadingID: id,
+			Level:     level,
+		}
+		block.Content = data[i:end]
+		p.addBlock(block)
+	}
+	return skip
+}
+
+func (p *Parser) isPrefixSpecialHeading(data []byte) bool {
+	if p.extensions|Mmark == 0 {
+		return false
+	}
+	if len(data) < 4 {
+		return false
+	}
+	if data[0] != '.' {
+		return false
+	}
+	if data[1] != '#' {
+		return false
+	}
+	if data[2] == '#' { // we don't support level, so nack this.
+		return false
+	}
+
+	if p.extensions&SpaceHeadings != 0 {
+		if data[2] != ' ' {
+			return false
+		}
+	}
+	return true
+}
+
+func (p *Parser) prefixSpecialHeading(data []byte) int {
+	i := skipChar(data, 2, ' ') // ".#" skipped
+	end := skipUntilChar(data, i, '\n')
+	skip := end
+	id := ""
+	if p.extensions&HeadingIDs != 0 {
+		j, k := 0, 0
+		// find start/end of heading id
+		for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
+		}
+		for k = j + 1; k < end && data[k] != '}'; k++ {
+		}
+		// extract heading id iff found
+		if j < end && k < end {
+			id = string(data[j+2 : k])
+			end = j
+			skip = k + 1
+			for end > 0 && data[end-1] == ' ' {
+				end--
+			}
+		}
+	}
+	for end > 0 && data[end-1] == '#' {
+		if isBackslashEscaped(data, end-1) {
+			break
+		}
+		end--
+	}
+	for end > 0 && data[end-1] == ' ' {
+		end--
+	}
+	if end > i {
+		if id == "" && p.extensions&AutoHeadingIDs != 0 {
+			id = sanitizeAnchorName(string(data[i:end]))
+		}
+		block := &ast.Heading{
+			HeadingID: id,
+			IsSpecial: true,
+			Level:     1, // always level 1.
+		}
+		block.Literal = data[i:end]
+		block.Content = data[i:end]
+		p.addBlock(block)
+	}
+	return skip
+}
+
+func (p *Parser) isUnderlinedHeading(data []byte) int {
+	// test of level 1 heading
+	if data[0] == '=' {
+		i := skipChar(data, 1, '=')
+		i = skipChar(data, i, ' ')
+		if i < len(data) && data[i] == '\n' {
+			return 1
+		}
+		return 0
+	}
+
+	// test of level 2 heading
+	if data[0] == '-' {
+		i := skipChar(data, 1, '-')
+		i = skipChar(data, i, ' ')
+		if i < len(data) && data[i] == '\n' {
+			return 2
+		}
+		return 0
+	}
+
+	return 0
+}
+
+func (p *Parser) titleBlock(data []byte, doRender bool) int {
+	if data[0] != '%' {
+		return 0
+	}
+	splitData := bytes.Split(data, []byte("\n"))
+	var i int
+	for idx, b := range splitData {
+		if !bytes.HasPrefix(b, []byte("%")) {
+			i = idx // - 1
+			break
+		}
+	}
+
+	data = bytes.Join(splitData[0:i], []byte("\n"))
+	consumed := len(data)
+	data = bytes.TrimPrefix(data, []byte("% "))
+	data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1)
+	block := &ast.Heading{
+		Level:        1,
+		IsTitleblock: true,
+	}
+	block.Content = data
+	p.addBlock(block)
+
+	return consumed
+}
+
+func (p *Parser) html(data []byte, doRender bool) int {
+	var i, j int
+
+	// identify the opening tag
+	if data[0] != '<' {
+		return 0
+	}
+	curtag, tagfound := p.htmlFindTag(data[1:])
+
+	// handle special cases
+	if !tagfound {
+		// check for an HTML comment
+		if size := p.htmlComment(data, doRender); size > 0 {
+			return size
+		}
+
+		// check for an <hr> tag
+		if size := p.htmlHr(data, doRender); size > 0 {
+			return size
+		}
+
+		// no special case recognized
+		return 0
+	}
+
+	// look for an unindented matching closing tag
+	// followed by a blank line
+	found := false
+	/*
+		closetag := []byte("\n</" + curtag + ">")
+		j = len(curtag) + 1
+		for !found {
+			// scan for a closing tag at the beginning of a line
+			if skip := bytes.Index(data[j:], closetag); skip >= 0 {
+				j += skip + len(closetag)
+			} else {
+				break
+			}
+
+			// see if it is the only thing on the line
+			if skip := p.isEmpty(data[j:]); skip > 0 {
+				// see if it is followed by a blank line/eof
+				j += skip
+				if j >= len(data) {
+					found = true
+					i = j
+				} else {
+					if skip := p.isEmpty(data[j:]); skip > 0 {
+						j += skip
+						found = true
+						i = j
+					}
+				}
+			}
+		}
+	*/
+
+	// if not found, try a second pass looking for indented match
+	// but not if tag is "ins" or "del" (following original Markdown.pl)
+	if !found && curtag != "ins" && curtag != "del" {
+		i = 1
+		for i < len(data) {
+			i++
+			for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
+				i++
+			}
+
+			if i+2+len(curtag) >= len(data) {
+				break
+			}
+
+			j = p.htmlFindEnd(curtag, data[i-1:])
+
+			if j > 0 {
+				i += j - 1
+				found = true
+				break
+			}
+		}
+	}
+
+	if !found {
+		return 0
+	}
+
+	// the end of the block has been found
+	if doRender {
+		// trim newlines
+		end := backChar(data, i, '\n')
+		htmlBLock := &ast.HTMLBlock{ast.Leaf{Content: data[:end]}}
+		p.addBlock(htmlBLock)
+		finalizeHTMLBlock(htmlBLock)
+	}
+
+	return i
+}
+
+func finalizeHTMLBlock(block *ast.HTMLBlock) {
+	block.Literal = block.Content
+	block.Content = nil
+}
+
+// HTML comment, lax form
+func (p *Parser) htmlComment(data []byte, doRender bool) int {
+	i := p.inlineHTMLComment(data)
+	// needs to end with a blank line
+	if j := p.isEmpty(data[i:]); j > 0 {
+		size := i + j
+		if doRender {
+			// trim trailing newlines
+			end := backChar(data, size, '\n')
+			htmlBLock := &ast.HTMLBlock{ast.Leaf{Content: data[:end]}}
+			p.addBlock(htmlBLock)
+			finalizeHTMLBlock(htmlBLock)
+		}
+		return size
+	}
+	return 0
+}
+
+// HR, which is the only self-closing block tag considered
+func (p *Parser) htmlHr(data []byte, doRender bool) int {
+	if len(data) < 4 {
+		return 0
+	}
+	if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
+		return 0
+	}
+	if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
+		// not an <hr> tag after all; at least not a valid one
+		return 0
+	}
+	i := 3
+	for i < len(data) && data[i] != '>' && data[i] != '\n' {
+		i++
+	}
+	if i < len(data) && data[i] == '>' {
+		i++
+		if j := p.isEmpty(data[i:]); j > 0 {
+			size := i + j
+			if doRender {
+				// trim newlines
+				end := backChar(data, size, '\n')
+				htmlBlock := &ast.HTMLBlock{ast.Leaf{Content: data[:end]}}
+				p.addBlock(htmlBlock)
+				finalizeHTMLBlock(htmlBlock)
+			}
+			return size
+		}
+	}
+	return 0
+}
+
+func (p *Parser) htmlFindTag(data []byte) (string, bool) {
+	i := skipAlnum(data, 0)
+	key := string(data[:i])
+	if _, ok := blockTags[key]; ok {
+		return key, true
+	}
+	return "", false
+}
+
+func (p *Parser) htmlFindEnd(tag string, data []byte) int {
+	// assume data[0] == '<' && data[1] == '/' already tested
+	if tag == "hr" {
+		return 2
+	}
+	// check if tag is a match
+	closetag := []byte("</" + tag + ">")
+	if !bytes.HasPrefix(data, closetag) {
+		return 0
+	}
+	i := len(closetag)
+
+	// check that the rest of the line is blank
+	skip := 0
+	if skip = p.isEmpty(data[i:]); skip == 0 {
+		return 0
+	}
+	i += skip
+	skip = 0
+
+	if i >= len(data) {
+		return i
+	}
+
+	if p.extensions&LaxHTMLBlocks != 0 {
+		return i
+	}
+	if skip = p.isEmpty(data[i:]); skip == 0 {
+		// following line must be blank
+		return 0
+	}
+
+	return i + skip
+}
+
+func (*Parser) isEmpty(data []byte) int {
+	// it is okay to call isEmpty on an empty buffer
+	if len(data) == 0 {
+		return 0
+	}
+
+	var i int
+	for i = 0; i < len(data) && data[i] != '\n'; i++ {
+		if data[i] != ' ' && data[i] != '\t' {
+			return 0
+		}
+	}
+	i = skipCharN(data, i, '\n', 1)
+	return i
+}
+
+func (*Parser) isHRule(data []byte) bool {
+	i := 0
+
+	// skip up to three spaces
+	for i < 3 && data[i] == ' ' {
+		i++
+	}
+
+	// look at the hrule char
+	if data[i] != '*' && data[i] != '-' && data[i] != '_' {
+		return false
+	}
+	c := data[i]
+
+	// the whole line must be the char or whitespace
+	n := 0
+	for i < len(data) && data[i] != '\n' {
+		switch {
+		case data[i] == c:
+			n++
+		case data[i] != ' ':
+			return false
+		}
+		i++
+	}
+
+	return n >= 3
+}
+
+// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data,
+// and returns the end index if so, or 0 otherwise. It also returns the marker found.
+// If syntax is not nil, it gets set to the syntax specified in the fence line.
+func isFenceLine(data []byte, syntax *string, oldmarker string) (end int, marker string) {
+	i, size := 0, 0
+
+	n := len(data)
+	// skip up to three spaces
+	for i < n && i < 3 && data[i] == ' ' {
+		i++
+	}
+
+	// check for the marker characters: ~ or `
+	if i >= n {
+		return 0, ""
+	}
+	if data[i] != '~' && data[i] != '`' {
+		return 0, ""
+	}
+
+	c := data[i]
+
+	// the whole line must be the same char or whitespace
+	for i < n && data[i] == c {
+		size++
+		i++
+	}
+
+	// the marker char must occur at least 3 times
+	if size < 3 {
+		return 0, ""
+	}
+	marker = string(data[i-size : i])
+
+	// if this is the end marker, it must match the beginning marker
+	if oldmarker != "" && marker != oldmarker {
+		return 0, ""
+	}
+
+	// TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here
+	// into one, always get the syntax, and discard it if the caller doesn't care.
+	if syntax != nil {
+		syn := 0
+		i = skipChar(data, i, ' ')
+
+		if i >= n {
+			if i == n {
+				return i, marker
+			}
+			return 0, ""
+		}
+
+		syntaxStart := i
+
+		if data[i] == '{' {
+			i++
+			syntaxStart++
+
+			for i < n && data[i] != '}' && data[i] != '\n' {
+				syn++
+				i++
+			}
+
+			if i >= n || data[i] != '}' {
+				return 0, ""
+			}
+
+			// strip all whitespace at the beginning and the end
+			// of the {} block
+			for syn > 0 && isSpace(data[syntaxStart]) {
+				syntaxStart++
+				syn--
+			}
+
+			for syn > 0 && isSpace(data[syntaxStart+syn-1]) {
+				syn--
+			}
+
+			i++
+		} else {
+			for i < n && !isSpace(data[i]) {
+				syn++
+				i++
+			}
+		}
+
+		*syntax = string(data[syntaxStart : syntaxStart+syn])
+	}
+
+	i = skipChar(data, i, ' ')
+	if i >= n || data[i] != '\n' {
+		if i == n {
+			return i, marker
+		}
+		return 0, ""
+	}
+	return i + 1, marker // Take newline into account.
+}
+
+// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning,
+// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
+// If doRender is true, a final newline is mandatory to recognize the fenced code block.
+func (p *Parser) fencedCodeBlock(data []byte, doRender bool) int {
+	var syntax string
+	beg, marker := isFenceLine(data, &syntax, "")
+	if beg == 0 || beg >= len(data) {
+		return 0
+	}
+
+	var work bytes.Buffer
+	work.WriteString(syntax)
+	work.WriteByte('\n')
+
+	for {
+		// safe to assume beg < len(data)
+
+		// check for the end of the code block
+		fenceEnd, _ := isFenceLine(data[beg:], nil, marker)
+		if fenceEnd != 0 {
+			beg += fenceEnd
+			break
+		}
+
+		// copy the current line
+		end := skipUntilChar(data, beg, '\n') + 1
+
+		// did we reach the end of the buffer without a closing marker?
+		if end >= len(data) {
+			return 0
+		}
+
+		// verbatim copy to the working buffer
+		if doRender {
+			work.Write(data[beg:end])
+		}
+		beg = end
+	}
+
+	if doRender {
+		codeBlock := &ast.CodeBlock{
+			IsFenced: true,
+		}
+		codeBlock.Content = work.Bytes() // TODO: get rid of temp buffer
+
+		if p.extensions&Mmark == 0 {
+			p.addBlock(codeBlock)
+			finalizeCodeBlock(codeBlock)
+			return beg
+		}
+
+		// Check for caption and if found make it a figure.
+		if captionContent, id, consumed := p.caption(data[beg:], []byte("Figure: ")); consumed > 0 {
+			figure := &ast.CaptionFigure{}
+			caption := &ast.Caption{}
+			figure.HeadingID = id
+			p.Inline(caption, captionContent)
+
+			p.addBlock(figure)
+			codeBlock.AsLeaf().Attribute = figure.AsContainer().Attribute
+			p.addChild(codeBlock)
+			finalizeCodeBlock(codeBlock)
+			p.addChild(caption)
+			p.finalize(figure)
+
+			beg += consumed
+
+			return beg
+		}
+
+		// Still here, normal block
+		p.addBlock(codeBlock)
+		finalizeCodeBlock(codeBlock)
+	}
+
+	return beg
+}
+
+func unescapeChar(str []byte) []byte {
+	if str[0] == '\\' {
+		return []byte{str[1]}
+	}
+	return []byte(html.UnescapeString(string(str)))
+}
+
+func unescapeString(str []byte) []byte {
+	if reBackslashOrAmp.Match(str) {
+		return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar)
+	}
+	return str
+}
+
+func finalizeCodeBlock(code *ast.CodeBlock) {
+	c := code.Content
+	if code.IsFenced {
+		newlinePos := bytes.IndexByte(c, '\n')
+		firstLine := c[:newlinePos]
+		rest := c[newlinePos+1:]
+		code.Info = unescapeString(bytes.Trim(firstLine, "\n"))
+		code.Literal = rest
+	} else {
+		code.Literal = c
+	}
+	code.Content = nil
+}
+
+func (p *Parser) table(data []byte) int {
+	i, columns, table := p.tableHeader(data)
+	if i == 0 {
+		return 0
+	}
+
+	p.addBlock(&ast.TableBody{})
+
+	for i < len(data) {
+		pipes, rowStart := 0, i
+		for ; i < len(data) && data[i] != '\n'; i++ {
+			if data[i] == '|' {
+				pipes++
+			}
+		}
+
+		if pipes == 0 {
+			i = rowStart
+			break
+		}
+
+		// include the newline in data sent to tableRow
+		i = skipCharN(data, i, '\n', 1)
+
+		if p.tableFooter(data[rowStart:i]) {
+			continue
+		}
+
+		p.tableRow(data[rowStart:i], columns, false)
+	}
+	if captionContent, id, consumed := p.caption(data[i:], []byte("Table: ")); consumed > 0 {
+		caption := &ast.Caption{}
+		p.Inline(caption, captionContent)
+
+		// Some switcheroo to re-insert the parsed table as a child of the captionfigure.
+		figure := &ast.CaptionFigure{}
+		figure.HeadingID = id
+		table2 := &ast.Table{}
+		// Retain any block level attributes.
+		table2.AsContainer().Attribute = table.AsContainer().Attribute
+		children := table.GetChildren()
+		ast.RemoveFromTree(table)
+
+		table2.SetChildren(children)
+		ast.AppendChild(figure, table2)
+		ast.AppendChild(figure, caption)
+
+		p.addChild(figure)
+		p.finalize(figure)
+
+		i += consumed
+	}
+
+	return i
+}
+
+// check if the specified position is preceded by an odd number of backslashes
+func isBackslashEscaped(data []byte, i int) bool {
+	backslashes := 0
+	for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
+		backslashes++
+	}
+	return backslashes&1 == 1
+}
+
+// tableHeaders parses the header. If recognized it will also add a table.
+func (p *Parser) tableHeader(data []byte) (size int, columns []ast.CellAlignFlags, table ast.Node) {
+	i := 0
+	colCount := 1
+	for i = 0; i < len(data) && data[i] != '\n'; i++ {
+		if data[i] == '|' && !isBackslashEscaped(data, i) {
+			colCount++
+		}
+	}
+
+	// doesn't look like a table header
+	if colCount == 1 {
+		return
+	}
+
+	// include the newline in the data sent to tableRow
+	j := skipCharN(data, i, '\n', 1)
+	header := data[:j]
+
+	// column count ignores pipes at beginning or end of line
+	if data[0] == '|' {
+		colCount--
+	}
+	if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) {
+		colCount--
+	}
+
+	columns = make([]ast.CellAlignFlags, colCount)
+
+	// move on to the header underline
+	i++
+	if i >= len(data) {
+		return
+	}
+
+	if data[i] == '|' && !isBackslashEscaped(data, i) {
+		i++
+	}
+	i = skipChar(data, i, ' ')
+
+	// each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
+	// and trailing | optional on last column
+	col := 0
+	n := len(data)
+	for i < n && data[i] != '\n' {
+		dashes := 0
+
+		if data[i] == ':' {
+			i++
+			columns[col] |= ast.TableAlignmentLeft
+			dashes++
+		}
+		for i < n && data[i] == '-' {
+			i++
+			dashes++
+		}
+		if i < n && data[i] == ':' {
+			i++
+			columns[col] |= ast.TableAlignmentRight
+			dashes++
+		}
+		for i < n && data[i] == ' ' {
+			i++
+		}
+		if i == n {
+			return
+		}
+		// end of column test is messy
+		switch {
+		case dashes < 3:
+			// not a valid column
+			return
+
+		case data[i] == '|' && !isBackslashEscaped(data, i):
+			// marker found, now skip past trailing whitespace
+			col++
+			i++
+			for i < n && data[i] == ' ' {
+				i++
+			}
+
+			// trailing junk found after last column
+			if col >= colCount && i < len(data) && data[i] != '\n' {
+				return
+			}
+
+		case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount:
+			// something else found where marker was required
+			return
+
+		case data[i] == '\n':
+			// marker is optional for the last column
+			col++
+
+		default:
+			// trailing junk found after last column
+			return
+		}
+	}
+	if col != colCount {
+		return
+	}
+
+	table = &ast.Table{}
+	p.addBlock(table)
+	p.addBlock(&ast.TableHeader{})
+	p.tableRow(header, columns, true)
+	size = skipCharN(data, i, '\n', 1)
+	return
+}
+
+func (p *Parser) tableRow(data []byte, columns []ast.CellAlignFlags, header bool) {
+	p.addBlock(&ast.TableRow{})
+	i, col := 0, 0
+
+	if data[i] == '|' && !isBackslashEscaped(data, i) {
+		i++
+	}
+
+	n := len(data)
+	for col = 0; col < len(columns) && i < n; col++ {
+		for i < n && data[i] == ' ' {
+			i++
+		}
+
+		cellStart := i
+
+		for i < n && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' {
+			i++
+		}
+
+		cellEnd := i
+
+		// skip the end-of-cell marker, possibly taking us past end of buffer
+		i++
+
+		for cellEnd > cellStart && cellEnd-1 < n && data[cellEnd-1] == ' ' {
+			cellEnd--
+		}
+
+		block := &ast.TableCell{
+			IsHeader: header,
+			Align:    columns[col],
+		}
+		block.Content = data[cellStart:cellEnd]
+		p.addBlock(block)
+	}
+
+	// pad it out with empty columns to get the right number
+	for ; col < len(columns); col++ {
+		block := &ast.TableCell{
+			IsHeader: header,
+			Align:    columns[col],
+		}
+		p.addBlock(block)
+	}
+
+	// silently ignore rows with too many cells
+}
+
+// tableFooter parses the (optional) table footer.
+func (p *Parser) tableFooter(data []byte) bool {
+	colCount := 1
+	for i := 0; i < len(data) && data[i] != '\n'; i++ {
+		if data[i] == '|' && !isBackslashEscaped(data, i) {
+			colCount++
+			continue
+		}
+		// remaining data must be the = character
+		if data[i] != '=' {
+			return false
+		}
+	}
+
+	// doesn't look like a table footer
+	if colCount == 1 {
+		return false
+	}
+
+	p.addBlock(&ast.TableFooter{})
+
+	return true
+}
+
+// returns blockquote prefix length
+func (p *Parser) quotePrefix(data []byte) int {
+	i := 0
+	n := len(data)
+	for i < 3 && i < n && data[i] == ' ' {
+		i++
+	}
+	if i < n && data[i] == '>' {
+		if i+1 < n && data[i+1] == ' ' {
+			return i + 2
+		}
+		return i + 1
+	}
+	return 0
+}
+
+// blockquote ends with at least one blank line
+// followed by something without a blockquote prefix
+func (p *Parser) terminateBlockquote(data []byte, beg, end int) bool {
+	if p.isEmpty(data[beg:]) <= 0 {
+		return false
+	}
+	if end >= len(data) {
+		return true
+	}
+	return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
+}
+
+// parse a blockquote fragment
+func (p *Parser) quote(data []byte) int {
+	var raw bytes.Buffer
+	beg, end := 0, 0
+	for beg < len(data) {
+		end = beg
+		// Step over whole lines, collecting them. While doing that, check for
+		// fenced code and if one's found, incorporate it altogether,
+		// irregardless of any contents inside it
+		for end < len(data) && data[end] != '\n' {
+			if p.extensions&FencedCode != 0 {
+				if i := p.fencedCodeBlock(data[end:], false); i > 0 {
+					// -1 to compensate for the extra end++ after the loop:
+					end += i - 1
+					break
+				}
+			}
+			end++
+		}
+		end = skipCharN(data, end, '\n', 1)
+		if pre := p.quotePrefix(data[beg:]); pre > 0 {
+			// skip the prefix
+			beg += pre
+		} else if p.terminateBlockquote(data, beg, end) {
+			break
+		}
+		// this line is part of the blockquote
+		raw.Write(data[beg:end])
+		beg = end
+	}
+
+	if p.extensions&Mmark == 0 {
+		block := p.addBlock(&ast.BlockQuote{})
+		p.block(raw.Bytes())
+		p.finalize(block)
+		return end
+	}
+
+	if captionContent, id, consumed := p.caption(data[end:], []byte("Quote: ")); consumed > 0 {
+		figure := &ast.CaptionFigure{}
+		caption := &ast.Caption{}
+		figure.HeadingID = id
+		p.Inline(caption, captionContent)
+
+		p.addBlock(figure) // this discard any attributes
+		block := &ast.BlockQuote{}
+		block.AsContainer().Attribute = figure.AsContainer().Attribute
+		p.addChild(block)
+		p.block(raw.Bytes())
+		p.finalize(block)
+
+		p.addChild(caption)
+		p.finalize(figure)
+
+		end += consumed
+
+		return end
+	}
+
+	block := p.addBlock(&ast.BlockQuote{})
+	p.block(raw.Bytes())
+	p.finalize(block)
+
+	return end
+}
+
+// returns prefix length for block code
+func (p *Parser) codePrefix(data []byte) int {
+	n := len(data)
+	if n >= 1 && data[0] == '\t' {
+		return 1
+	}
+	if n >= 4 && data[3] == ' ' && data[2] == ' ' && data[1] == ' ' && data[0] == ' ' {
+		return 4
+	}
+	return 0
+}
+
+func (p *Parser) code(data []byte) int {
+	var work bytes.Buffer
+
+	i := 0
+	for i < len(data) {
+		beg := i
+
+		i = skipUntilChar(data, i, '\n')
+		i = skipCharN(data, i, '\n', 1)
+
+		blankline := p.isEmpty(data[beg:i]) > 0
+		if pre := p.codePrefix(data[beg:i]); pre > 0 {
+			beg += pre
+		} else if !blankline {
+			// non-empty, non-prefixed line breaks the pre
+			i = beg
+			break
+		}
+
+		// verbatim copy to the working buffer
+		if blankline {
+			work.WriteByte('\n')
+		} else {
+			work.Write(data[beg:i])
+		}
+	}
+
+	// trim all the \n off the end of work
+	workbytes := work.Bytes()
+
+	eol := backChar(workbytes, len(workbytes), '\n')
+
+	if eol != len(workbytes) {
+		work.Truncate(eol)
+	}
+
+	work.WriteByte('\n')
+
+	codeBlock := &ast.CodeBlock{
+		IsFenced: false,
+	}
+	// TODO: get rid of temp buffer
+	codeBlock.Content = work.Bytes()
+	p.addBlock(codeBlock)
+	finalizeCodeBlock(codeBlock)
+
+	return i
+}
+
+// returns unordered list item prefix
+func (p *Parser) uliPrefix(data []byte) int {
+	// start with up to 3 spaces
+	i := skipCharN(data, 0, ' ', 3)
+
+	if i >= len(data)-1 {
+		return 0
+	}
+	// need one of {'*', '+', '-'} followed by a space or a tab
+	if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
+		(data[i+1] != ' ' && data[i+1] != '\t') {
+		return 0
+	}
+	return i + 2
+}
+
+// returns ordered list item prefix
+func (p *Parser) oliPrefix(data []byte) int {
+	// start with up to 3 spaces
+	i := skipCharN(data, 0, ' ', 3)
+
+	// count the digits
+	start := i
+	for i < len(data) && data[i] >= '0' && data[i] <= '9' {
+		i++
+	}
+	if start == i || i >= len(data)-1 {
+		return 0
+	}
+
+	// we need >= 1 digits followed by a dot and a space or a tab
+	if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') {
+		return 0
+	}
+	return i + 2
+}
+
+// returns definition list item prefix
+func (p *Parser) dliPrefix(data []byte) int {
+	if len(data) < 2 {
+		return 0
+	}
+	// need a ':' followed by a space or a tab
+	if data[0] != ':' || !(data[1] == ' ' || data[1] == '\t') {
+		return 0
+	}
+	i := skipChar(data, 0, ' ')
+	return i + 2
+}
+
+// parse ordered or unordered list block
+func (p *Parser) list(data []byte, flags ast.ListType, start int) int {
+	i := 0
+	flags |= ast.ListItemBeginningOfList
+	list := &ast.List{
+		ListFlags: flags,
+		Tight:     true,
+		Start:     start,
+	}
+	block := p.addBlock(list)
+
+	for i < len(data) {
+		skip := p.listItem(data[i:], &flags)
+		if flags&ast.ListItemContainsBlock != 0 {
+			list.Tight = false
+		}
+		i += skip
+		if skip == 0 || flags&ast.ListItemEndOfList != 0 {
+			break
+		}
+		flags &= ^ast.ListItemBeginningOfList
+	}
+
+	above := block.GetParent()
+	finalizeList(list)
+	p.tip = above
+	return i
+}
+
+// Returns true if the list item is not the same type as its parent list
+func (p *Parser) listTypeChanged(data []byte, flags *ast.ListType) bool {
+	if p.dliPrefix(data) > 0 && *flags&ast.ListTypeDefinition == 0 {
+		return true
+	} else if p.oliPrefix(data) > 0 && *flags&ast.ListTypeOrdered == 0 {
+		return true
+	} else if p.uliPrefix(data) > 0 && (*flags&ast.ListTypeOrdered != 0 || *flags&ast.ListTypeDefinition != 0) {
+		return true
+	}
+	return false
+}
+
+// Returns true if block ends with a blank line, descending if needed
+// into lists and sublists.
+func endsWithBlankLine(block ast.Node) bool {
+	// TODO: figure this out. Always false now.
+	for block != nil {
+		//if block.lastLineBlank {
+		//return true
+		//}
+		switch block.(type) {
+		case *ast.List, *ast.ListItem:
+			block = ast.GetLastChild(block)
+		default:
+			return false
+		}
+	}
+	return false
+}
+
+func finalizeList(list *ast.List) {
+	items := list.Parent.GetChildren()
+	lastItemIdx := len(items) - 1
+	for i, item := range items {
+		isLastItem := i == lastItemIdx
+		// check for non-final list item ending with blank line:
+		if !isLastItem && endsWithBlankLine(item) {
+			list.Tight = false
+			break
+		}
+		// recurse into children of list item, to see if there are spaces
+		// between any of them:
+		subItems := item.GetParent().GetChildren()
+		lastSubItemIdx := len(subItems) - 1
+		for j, subItem := range subItems {
+			isLastSubItem := j == lastSubItemIdx
+			if (!isLastItem || !isLastSubItem) && endsWithBlankLine(subItem) {
+				list.Tight = false
+				break
+			}
+		}
+	}
+}
+
+// Parse a single list item.
+// Assumes initial prefix is already removed if this is a sublist.
+func (p *Parser) listItem(data []byte, flags *ast.ListType) int {
+	// keep track of the indentation of the first line
+	itemIndent := 0
+	if data[0] == '\t' {
+		itemIndent += 4
+	} else {
+		for itemIndent < 3 && data[itemIndent] == ' ' {
+			itemIndent++
+		}
+	}
+
+	var bulletChar byte = '*'
+	i := p.uliPrefix(data)
+	if i == 0 {
+		i = p.oliPrefix(data)
+	} else {
+		bulletChar = data[i-2]
+	}
+	if i == 0 {
+		i = p.dliPrefix(data)
+		// reset definition term flag
+		if i > 0 {
+			*flags &= ^ast.ListTypeTerm
+		}
+	}
+	if i == 0 {
+		// if in definition list, set term flag and continue
+		if *flags&ast.ListTypeDefinition != 0 {
+			*flags |= ast.ListTypeTerm
+		} else {
+			return 0
+		}
+	}
+
+	// skip leading whitespace on first line
+	i = skipChar(data, i, ' ')
+
+	// find the end of the line
+	line := i
+	for i > 0 && i < len(data) && data[i-1] != '\n' {
+		i++
+	}
+
+	// get working buffer
+	var raw bytes.Buffer
+
+	// put the first line into the working buffer
+	raw.Write(data[line:i])
+	line = i
+
+	// process the following lines
+	containsBlankLine := false
+	sublist := 0
+
+gatherlines:
+	for line < len(data) {
+		i++
+
+		// find the end of this line
+		for i < len(data) && data[i-1] != '\n' {
+			i++
+		}
+
+		// if it is an empty line, guess that it is part of this item
+		// and move on to the next line
+		if p.isEmpty(data[line:i]) > 0 {
+			containsBlankLine = true
+			line = i
+			continue
+		}
+
+		// calculate the indentation
+		indent := 0
+		indentIndex := 0
+		if data[line] == '\t' {
+			indentIndex++
+			indent += 4
+		} else {
+			for indent < 4 && line+indent < i && data[line+indent] == ' ' {
+				indent++
+				indentIndex++
+			}
+		}
+
+		chunk := data[line+indentIndex : i]
+
+		// evaluate how this line fits in
+		switch {
+		// is this a nested list item?
+		case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || p.oliPrefix(chunk) > 0 || p.dliPrefix(chunk) > 0:
+
+			// to be a nested list, it must be indented more
+			// if not, it is either a different kind of list
+			// or the next item in the same list
+			if indent <= itemIndent {
+				if p.listTypeChanged(chunk, flags) {
+					*flags |= ast.ListItemEndOfList
+				} else if containsBlankLine {
+					*flags |= ast.ListItemContainsBlock
+				}
+
+				break gatherlines
+			}
+
+			if containsBlankLine {
+				*flags |= ast.ListItemContainsBlock
+			}
+
+			// is this the first item in the nested list?
+			if sublist == 0 {
+				sublist = raw.Len()
+				// in the case of dliPrefix we are too late and need to search back for the definition item, which
+				// should be on the previous line, we then adjust sublist to start there.
+				if p.dliPrefix(chunk) > 0 {
+					sublist = backUntilChar(raw.Bytes(), raw.Len()-1, '\n')
+				}
+			}
+
+			// is this a nested prefix heading?
+		case p.isPrefixHeading(chunk), p.isPrefixSpecialHeading(chunk):
+			// if the heading is not indented, it is not nested in the list
+			// and thus ends the list
+			if containsBlankLine && indent < 4 {
+				*flags |= ast.ListItemEndOfList
+				break gatherlines
+			}
+			*flags |= ast.ListItemContainsBlock
+
+		// anything following an empty line is only part
+		// of this item if it is indented 4 spaces
+		// (regardless of the indentation of the beginning of the item)
+		case containsBlankLine && indent < 4:
+			if *flags&ast.ListTypeDefinition != 0 && i < len(data)-1 {
+				// is the next item still a part of this list?
+				next := i
+				for next < len(data) && data[next] != '\n' {
+					next++
+				}
+				for next < len(data)-1 && data[next] == '\n' {
+					next++
+				}
+				if i < len(data)-1 && data[i] != ':' && next < len(data)-1 && data[next] != ':' {
+					*flags |= ast.ListItemEndOfList
+				}
+			} else {
+				*flags |= ast.ListItemEndOfList
+			}
+			break gatherlines
+
+		// a blank line means this should be parsed as a block
+		case containsBlankLine:
+			raw.WriteByte('\n')
+			*flags |= ast.ListItemContainsBlock
+		}
+
+		// if this line was preceded by one or more blanks,
+		// re-introduce the blank into the buffer
+		if containsBlankLine {
+			containsBlankLine = false
+			raw.WriteByte('\n')
+		}
+
+		// add the line into the working buffer without prefix
+		raw.Write(data[line+indentIndex : i])
+
+		line = i
+	}
+
+	rawBytes := raw.Bytes()
+
+	listItem := &ast.ListItem{
+		ListFlags:  *flags,
+		Tight:      false,
+		BulletChar: bulletChar,
+		Delimiter:  '.', // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark
+	}
+	p.addBlock(listItem)
+
+	// render the contents of the list item
+	if *flags&ast.ListItemContainsBlock != 0 && *flags&ast.ListTypeTerm == 0 {
+		// intermediate render of block item, except for definition term
+		if sublist > 0 {
+			p.block(rawBytes[:sublist])
+			p.block(rawBytes[sublist:])
+		} else {
+			p.block(rawBytes)
+		}
+	} else {
+		// intermediate render of inline item
+		para := &ast.Paragraph{}
+		if sublist > 0 {
+			para.Content = rawBytes[:sublist]
+		} else {
+			para.Content = rawBytes
+		}
+		p.addChild(para)
+		if sublist > 0 {
+			p.block(rawBytes[sublist:])
+		}
+	}
+	return line
+}
+
+// render a single paragraph that has already been parsed out
+func (p *Parser) renderParagraph(data []byte) {
+	if len(data) == 0 {
+		return
+	}
+
+	// trim leading spaces
+	beg := skipChar(data, 0, ' ')
+
+	end := len(data)
+	// trim trailing newline
+	if data[len(data)-1] == '\n' {
+		end--
+	}
+
+	// trim trailing spaces
+	for end > beg && data[end-1] == ' ' {
+		end--
+	}
+	para := &ast.Paragraph{}
+	para.Content = data[beg:end]
+	p.addBlock(para)
+}
+
+// blockMath handle block surround with $$
+func (p *Parser) blockMath(data []byte) int {
+	if len(data) <= 4 || data[0] != '$' || data[1] != '$' || data[2] == '$' {
+		return 0
+	}
+
+	// find next $$
+	var end int
+	for end = 2; end+1 < len(data) && (data[end] != '$' || data[end+1] != '$'); end++ {
+	}
+
+	// $$ not match
+	if end+1 == len(data) {
+		return 0
+	}
+
+	// render the display math
+	mathBlock := &ast.MathBlock{}
+	mathBlock.Literal = data[2:end]
+	p.addBlock(mathBlock)
+
+	return end + 2
+}
+
+func (p *Parser) paragraph(data []byte) int {
+	// prev: index of 1st char of previous line
+	// line: index of 1st char of current line
+	// i: index of cursor/end of current line
+	var prev, line, i int
+	tabSize := tabSizeDefault
+	if p.extensions&TabSizeEight != 0 {
+		tabSize = tabSizeDouble
+	}
+	// keep going until we find something to mark the end of the paragraph
+	for i < len(data) {
+		// mark the beginning of the current line
+		prev = line
+		current := data[i:]
+		line = i
+
+		// did we find a reference or a footnote? If so, end a paragraph
+		// preceding it and report that we have consumed up to the end of that
+		// reference:
+		if refEnd := isReference(p, current, tabSize); refEnd > 0 {
+			p.renderParagraph(data[:i])
+			return i + refEnd
+		}
+
+		// did we find a blank line marking the end of the paragraph?
+		if n := p.isEmpty(current); n > 0 {
+			// did this blank line followed by a definition list item?
+			if p.extensions&DefinitionLists != 0 {
+				if i < len(data)-1 && data[i+1] == ':' {
+					listLen := p.list(data[prev:], ast.ListTypeDefinition, 0)
+					return prev + listLen
+				}
+			}
+
+			p.renderParagraph(data[:i])
+			return i + n
+		}
+
+		// an underline under some text marks a heading, so our paragraph ended on prev line
+		if i > 0 {
+			if level := p.isUnderlinedHeading(current); level > 0 {
+				// render the paragraph
+				p.renderParagraph(data[:prev])
+
+				// ignore leading and trailing whitespace
+				eol := i - 1
+				for prev < eol && data[prev] == ' ' {
+					prev++
+				}
+				for eol > prev && data[eol-1] == ' ' {
+					eol--
+				}
+
+				id := ""
+				if p.extensions&AutoHeadingIDs != 0 {
+					id = sanitizeAnchorName(string(data[prev:eol]))
+				}
+
+				block := &ast.Heading{
+					Level:     level,
+					HeadingID: id,
+				}
+				block.Content = data[prev:eol]
+				p.addBlock(block)
+
+				// find the end of the underline
+				return skipUntilChar(data, i, '\n')
+			}
+		}
+
+		// if the next line starts a block of HTML, then the paragraph ends here
+		if p.extensions&LaxHTMLBlocks != 0 {
+			if data[i] == '<' && p.html(current, false) > 0 {
+				// rewind to before the HTML block
+				p.renderParagraph(data[:i])
+				return i
+			}
+		}
+
+		// if there's a prefixed heading or a horizontal rule after this, paragraph is over
+		if p.isPrefixHeading(current) || p.isPrefixSpecialHeading(current) || p.isHRule(current) {
+			p.renderParagraph(data[:i])
+			return i
+		}
+
+		// if there's a fenced code block, paragraph is over
+		if p.extensions&FencedCode != 0 {
+			if p.fencedCodeBlock(current, false) > 0 {
+				p.renderParagraph(data[:i])
+				return i
+			}
+		}
+
+		// if there's a figure block, paragraph is over
+		if p.extensions&Mmark != 0 {
+			if p.figureBlock(current, false) > 0 {
+				p.renderParagraph(data[:i])
+				return i
+			}
+		}
+
+		// if there's a definition list item, prev line is a definition term
+		if p.extensions&DefinitionLists != 0 {
+			if p.dliPrefix(current) != 0 {
+				ret := p.list(data[prev:], ast.ListTypeDefinition, 0)
+				return ret + prev
+			}
+		}
+
+		// if there's a list after this, paragraph is over
+		if p.extensions&NoEmptyLineBeforeBlock != 0 {
+			if p.uliPrefix(current) != 0 ||
+				p.oliPrefix(current) != 0 ||
+				p.quotePrefix(current) != 0 ||
+				p.codePrefix(current) != 0 {
+				p.renderParagraph(data[:i])
+				return i
+			}
+		}
+
+		// otherwise, scan to the beginning of the next line
+		nl := bytes.IndexByte(data[i:], '\n')
+		if nl >= 0 {
+			i += nl + 1
+		} else {
+			i += len(data[i:])
+		}
+	}
+
+	p.renderParagraph(data[:i])
+	return i
+}
+
+// skipChar advances i as long as data[i] == c
+func skipChar(data []byte, i int, c byte) int {
+	n := len(data)
+	for i < n && data[i] == c {
+		i++
+	}
+	return i
+}
+
+// like skipChar but only skips up to max characters
+func skipCharN(data []byte, i int, c byte, max int) int {
+	n := len(data)
+	for i < n && max > 0 && data[i] == c {
+		i++
+		max--
+	}
+	return i
+}
+
+// skipUntilChar advances i as long as data[i] != c
+func skipUntilChar(data []byte, i int, c byte) int {
+	n := len(data)
+	for i < n && data[i] != c {
+		i++
+	}
+	return i
+}
+
+func skipAlnum(data []byte, i int) int {
+	n := len(data)
+	for i < n && isAlnum(data[i]) {
+		i++
+	}
+	return i
+}
+
+func skipSpace(data []byte, i int) int {
+	n := len(data)
+	for i < n && isSpace(data[i]) {
+		i++
+	}
+	return i
+}
+
+func backChar(data []byte, i int, c byte) int {
+	for i > 0 && data[i-1] == c {
+		i--
+	}
+	return i
+}
+
+func backUntilChar(data []byte, i int, c byte) int {
+	for i > 0 && data[i-1] != c {
+		i--
+	}
+	return i
+}
diff --git a/vendor/github.com/gomarkdown/markdown/parser/callout.go b/vendor/github.com/gomarkdown/markdown/parser/callout.go
new file mode 100644
index 00000000..15858aa9
--- /dev/null
+++ b/vendor/github.com/gomarkdown/markdown/parser/callout.go
@@ -0,0 +1,29 @@
+package parser
+
+import (
+	"bytes"
+	"strconv"
+)
+
+// IsCallout detects a callout in the following format: <<N>> Where N is a integer > 0.
+func IsCallout(data []byte) (id []byte, consumed int) {
+	if !bytes.HasPrefix(data, []byte("<<")) {
+		return nil, 0
+	}
+	start := 2
+	end := bytes.Index(data[start:], []byte(">>"))
+	if end < 0 {
+		return nil, 0
+	}
+
+	b := data[start : start+end]
+	b = bytes.TrimSpace(b)
+	i, err := strconv.Atoi(string(b))
+	if err != nil {
+		return nil, 0
+	}
+	if i <= 0 {
+		return nil, 0
+	}
+	return b, start + end + 2 // 2 for >>
+}
diff --git a/vendor/github.com/gomarkdown/markdown/parser/caption.go b/vendor/github.com/gomarkdown/markdown/parser/caption.go
new file mode 100644
index 00000000..54d3f741
--- /dev/null
+++ b/vendor/github.com/gomarkdown/markdown/parser/caption.go
@@ -0,0 +1,70 @@
+package parser
+
+import (
+	"bytes"
+)
+
+// caption checks for a caption, it returns the caption data and a potential "headingID".
+func (p *Parser) caption(data, caption []byte) ([]byte, string, int) {
+	if !bytes.HasPrefix(data, caption) {
+		return nil, "", 0
+	}
+	j := len(caption)
+	data = data[j:]
+	end := p.linesUntilEmpty(data)
+
+	data = data[:end]
+
+	id, start := captionID(data)
+	if id != "" {
+		return data[:start], id, end + j
+	}
+
+	return data, "", end + j
+}
+
+// linesUntilEmpty scans lines up to the first empty line.
+func (p *Parser) linesUntilEmpty(data []byte) int {
+	line, i := 0, 0
+
+	for line < len(data) {
+		i++
+
+		// find the end of this line
+		for i < len(data) && data[i-1] != '\n' {
+			i++
+		}
+
+		if p.isEmpty(data[line:i]) == 0 {
+			line = i
+			continue
+		}
+
+		break
+	}
+	return i
+}
+
+// captionID checks if the caption *ends* in {#....}. If so the text after {# is taken to be
+// the ID/anchor of the entire figure block.
+func captionID(data []byte) (string, int) {
+	end := len(data)
+
+	j, k := 0, 0
+	// find start/end of heading id
+	for j = 0; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
+	}
+	for k = j + 1; k < end && data[k] != '}'; k++ {
+	}
+	// remains must be whitespace.
+	for l := k + 1; l < end; l++ {
+		if !isSpace(data[l]) {
+			return "", 0
+		}
+	}
+
+	if j > 0 && k > 0 && j+2 < k {
+		return string(data[j+2 : k]), j
+	}
+	return "", 0
+}
diff --git a/vendor/github.com/gomarkdown/markdown/parser/citation.go b/vendor/github.com/gomarkdown/markdown/parser/citation.go
new file mode 100644
index 00000000..8ea1fbee
--- /dev/null
+++ b/vendor/github.com/gomarkdown/markdown/parser/citation.go
@@ -0,0 +1,86 @@
+package parser
+
+import (
+	"bytes"
+
+	"github.com/gomarkdown/markdown/ast"
+)
+
+// citation parses a citation. In its most simple form [@ref], we allow multiple
+// being separated by semicolons and a sub reference inside ala pandoc: [@ref, p. 23].
+// Each citation can have a modifier: !, ? or - wich mean:
+//
+// ! - normative
+// ? - formative
+// - - suppressed
+//
+// The suffix starts after a comma, we strip any whitespace before and after. If the output
+// allows for it, this can be rendered.
+func citation(p *Parser, data []byte, offset int) (int, ast.Node) {
+	// look for the matching closing bracket
+	i := offset + 1
+	for level := 1; level > 0 && i < len(data); i++ {
+		switch {
+		case data[i] == '\n':
+			// no newlines allowed.
+			return 0, nil
+
+		case data[i-1] == '\\':
+			continue
+
+		case data[i] == '[':
+			level++
+
+		case data[i] == ']':
+			level--
+			if level <= 0 {
+				i-- // compensate for extra i++ in for loop
+			}
+		}
+	}
+
+	if i >= len(data) {
+		return 0, nil
+	}
+
+	node := &ast.Citation{}
+
+	citations := bytes.Split(data[1:i], []byte(";"))
+	for _, citation := range citations {
+		var suffix []byte
+		citation = bytes.TrimSpace(citation)
+		j := 0
+		if citation[j] != '@' {
+			// not a citation, drop out entirely.
+			return 0, nil
+		}
+		if c := bytes.Index(citation, []byte(",")); c > 0 {
+			part := citation[:c]
+			suff := citation[c+1:]
+			part = bytes.TrimSpace(part)
+			suff = bytes.TrimSpace(suff)
+
+			citation = part
+			suffix = suff
+		}
+
+		citeType := ast.CitationTypeInformative
+		j = 1
+		switch citation[j] {
+		case '!':
+			citeType = ast.CitationTypeNormative
+			j++
+		case '?':
+			citeType = ast.CitationTypeInformative
+			j++
+		case '-':
+			citeType = ast.CitationTypeSuppressed
+			j++
+		}
+		node.Destination = append(node.Destination, citation[j:])
+		node.Type = append(node.Type, citeType)
+		node.Suffix = append(node.Suffix, suffix)
+	}
+
+	return i + 1, node
+}
diff --git a/vendor/github.com/gomarkdown/markdown/parser/esc.go b/vendor/github.com/gomarkdown/markdown/parser/esc.go
new file mode 100644
index 00000000..0a79aa35
--- /dev/null
+++ b/vendor/github.com/gomarkdown/markdown/parser/esc.go
@@ -0,0 +1,20 @@
+package parser
+
+// isEscape returns true if byte i is prefixed by an odd number of backslahses.
+func isEscape(data []byte, i int) bool {
+	if i == 0 {
+		return false
+	}
+	if i == 1 {
+		return data[0] == '\\'
+	}
+	j := i - 1
+	for ; j >= 0; j-- {
+		if data[j] != '\\' {
+			break
+		}
+	}
+	j++
+	// odd number of backslahes means escape
+	return (i-j)%2 != 0
+}
diff --git a/vendor/github.com/gomarkdown/markdown/parser/figures.go b/vendor/github.com/gomarkdown/markdown/parser/figures.go
new file mode 100644
index 00000000..6615449c
--- /dev/null
+++ b/vendor/github.com/gomarkdown/markdown/parser/figures.go
@@ -0,0 +1,119 @@
+package parser
+
+import (
+	"bytes"
+
+	"github.com/gomarkdown/markdown/ast"
+)
+
+// sFigureLine checks if there's a figure line (e.g., !--- ) at the beginning of data,
+// and returns the end index if so, or 0 otherwise.
+func sFigureLine(data []byte, oldmarker string) (end int, marker string) {
+	i, size := 0, 0
+
+	n := len(data)
+	// skip up to three spaces
+	for i < n && i < 3 && data[i] == ' ' {
+		i++
+	}
+
+	// check for the marker characters: !
+	if i+1 >= n {
+		return 0, ""
+	}
+	if data[i] != '!' || data[i+1] != '-' {
+		return 0, ""
+	}
+	i++
+
+	c := data[i] // i.e. the -
+
+	// the whole line must be the same char or whitespace
+	for i < n && data[i] == c {
+		size++
+		i++
+	}
+
+	// the marker char must occur at least 3 times
+	if size < 3 {
+		return 0, ""
+	}
+	marker = string(data[i-size : i])
+
+	// if this is the end marker, it must match the beginning marker
+	if oldmarker != "" && marker != oldmarker {
+		return 0, ""
+	}
+
+	// there is no syntax modifier although it might be an idea to re-use this space for something?
+
+	i = skipChar(data, i, ' ')
+	if i >= n || data[i] != '\n' {
+		if i == n {
+			return i, marker
+		}
+		return 0, ""
+	}
+	return i + 1, marker // Take newline into account.
+}
+
+// figureBlock returns the end index if data contains a figure block at the beginning,
+// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
+// If doRender is true, a final newline is mandatory to recognize the figure block.
+func (p *Parser) figureBlock(data []byte, doRender bool) int {
+	beg, marker := sFigureLine(data, "")
+	if beg == 0 || beg >= len(data) {
+		return 0
+	}
+
+	var raw bytes.Buffer
+
+	for {
+		// safe to assume beg < len(data)
+
+		// check for the end of the code block
+		figEnd, _ := sFigureLine(data[beg:], marker)
+		if figEnd != 0 {
+			beg += figEnd
+			break
+		}
+
+		// copy the current line
+		end := skipUntilChar(data, beg, '\n') + 1
+
+		// did we reach the end of the buffer without a closing marker?
+		if end >= len(data) {
+			return 0
+		}
+
+		// verbatim copy to the working buffer
+		if doRender {
+			raw.Write(data[beg:end])
+		}
+		beg = end
+	}
+
+	if !doRender {
+		return beg
+	}
+
+	figure := &ast.CaptionFigure{}
+	p.addBlock(figure)
+	p.block(raw.Bytes())
+
+	defer p.finalize(figure)
+
+	if captionContent, id, consumed := p.caption(data[beg:], []byte("Figure: ")); consumed > 0 {
+		caption := &ast.Caption{}
+		p.Inline(caption, captionContent)
+
+		figure.HeadingID = id
+
+		p.addChild(caption)
+
+		beg += consumed
+	}
+
+	p.finalize(figure)
+	return beg
+}
diff --git a/vendor/github.com/gomarkdown/markdown/parser/include.go b/vendor/github.com/gomarkdown/markdown/parser/include.go
new file mode 100644
index 00000000..2448a685
--- /dev/null
+++ b/vendor/github.com/gomarkdown/markdown/parser/include.go
@@ -0,0 +1,129 @@
+package parser
+
+import (
+	"bytes"
+	"path"
+	"path/filepath"
+)
+
+// isInclude parses {{...}}[...], that contains a path between the {{, the [...] syntax contains
+// an address to select which lines to include. It is treated as an opaque string and just given
+// to readInclude.
+func (p *Parser) isInclude(data []byte) (filename string, address []byte, consumed int) {
+	i := skipCharN(data, 0, ' ', 3) // start with up to 3 spaces
+	if len(data[i:]) < 3 {
+		return "", nil, 0
+	}
+	if data[i] != '{' || data[i+1] != '{' {
+		return "", nil, 0
+	}
+	start := i + 2
+
+	// find the end delimiter
+	i = skipUntilChar(data, i, '}')
+	if i+1 >= len(data) {
+		return "", nil, 0
+	}
+	end := i
+	i++
+	if data[i] != '}' {
+		return "", nil, 0
+	}
+	filename = string(data[start:end])
+
+	if i+1 < len(data) && data[i+1] == '[' { // potential address specification
+		start := i + 2
+
+		end = skipUntilChar(data, start, ']')
+		if end >= len(data) {
+			return "", nil, 0
+		}
+		address = data[start:end]
+		return filename, address, end + 1
+	}
+
+	return filename, address, i + 1
+}
+
+func (p *Parser) readInclude(from, file string, address []byte) []byte {
+	if p.Opts.ReadIncludeFn != nil {
+		return p.Opts.ReadIncludeFn(from, file, address)
+	}
+
+	return nil
+}
+
+// isCodeInclude parses <{{...}} which is similar to isInclude the returned bytes are, however wrapped in a code block.
+func (p *Parser) isCodeInclude(data []byte) (filename string, address []byte, consumed int) {
+	i := skipCharN(data, 0, ' ', 3) // start with up to 3 spaces
+	if len(data[i:]) < 3 {
+		return "", nil, 0
+	}
+	if data[i] != '<' {
+		return "", nil, 0
+	}
+	start := i
+
+	filename, address, consumed = p.isInclude(data[i+1:])
+	if consumed == 0 {
+		return "", nil, 0
+	}
+	return filename, address, start + consumed + 1
+}
+
+// readCodeInclude acts like include except the returned bytes are wrapped in a fenced code block.
+func (p *Parser) readCodeInclude(from, file string, address []byte) []byte {
+	data := p.readInclude(from, file, address)
+	if data == nil {
+		return nil
+	}
+	ext := path.Ext(file)
+	buf := &bytes.Buffer{}
+	buf.Write([]byte("```"))
+	if ext != "" { // starts with a dot
+		buf.WriteString(" " + ext[1:] + "\n")
+	} else {
+		buf.WriteByte('\n')
+	}
+	buf.Write(data)
+	buf.WriteString("```\n")
+	return buf.Bytes()
+}
+
+// incStack hold the current stack of chained includes. Each value is the containing
+// path of the file being parsed.
+type incStack struct {
+	stack []string
+}
+
+func newIncStack() *incStack {
+	return &incStack{stack: []string{}}
+}
+
+// Push updates i with new.
+func (i *incStack) Push(new string) {
+	if path.IsAbs(new) {
+		i.stack = append(i.stack, path.Dir(new))
+		return
+	}
+	last := ""
+	if len(i.stack) > 0 {
+		last = i.stack[len(i.stack)-1]
+	}
+	i.stack = append(i.stack, path.Dir(filepath.Join(last, new)))
+}
+
+// Pop pops the last value.
+func (i *incStack) Pop() {
+	if len(i.stack) == 0 {
+		return
+	}
+	i.stack = i.stack[:len(i.stack)-1]
+}
+
+func (i *incStack) Last() string {
+	if len(i.stack) == 0 {
+		return ""
+	}
+	return i.stack[len(i.stack)-1]
+}
diff --git a/vendor/github.com/gomarkdown/markdown/parser/inline.go b/vendor/github.com/gomarkdown/markdown/parser/inline.go
new file mode 100644
index 00000000..81766b85
--- /dev/null
+++ b/vendor/github.com/gomarkdown/markdown/parser/inline.go
@@ -0,0 +1,1284 @@
+package parser
+
+import (
+	"bytes"
+	"regexp"
+	"strconv"
+
+	"github.com/gomarkdown/markdown/ast"
+)
+
+// Parsing of inline elements
+
+var (
+	urlRe    = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+`
+	anchorRe = regexp.MustCompile(`^(<a\shref="` + urlRe + `"(\stitle="[^"<>]+")?\s?>` + urlRe + `<\/a>)`)
+
+	// TODO: improve this regexp to catch all possible entities:
+	htmlEntityRe = regexp.MustCompile(`&[a-z]{2,5};`)
+)
+
+// Inline parses text within a block.
+// Each function returns the number of consumed chars.
+func (p *Parser) Inline(currBlock ast.Node, data []byte) {
+	// handlers might call us recursively: enforce a maximum depth
+	if p.nesting >= p.maxNesting || len(data) == 0 {
+		return
+	}
+	p.nesting++
+	beg, end := 0, 0
+
+	n := len(data)
+	for end < n {
+		handler := p.inlineCallback[data[end]]
+		if handler == nil {
+			end++
+			continue
+		}
+		consumed, node := handler(p, data, end)
+		if consumed == 0 {
+			// no action from the callback
+			end++
+			continue
+		}
+		// copy inactive chars into the output
+		ast.AppendChild(currBlock, newTextNode(data[beg:end]))
+		if node != nil {
+			ast.AppendChild(currBlock, node)
+		}
+		beg = end + consumed
+		end = beg
+	}
+
+	if beg < n {
+		if data[end-1] == '\n' {
+			end--
+		}
+		ast.AppendChild(currBlock, newTextNode(data[beg:end]))
+	}
+	p.nesting--
+}
+
+// single and double emphasis parsing
+func emphasis(p *Parser, data []byte, offset int) (int, ast.Node) {
+	data = data[offset:]
+	c := data[0]
+
+	n := len(data)
+	if n > 2 && data[1] != c {
+		// whitespace cannot follow an opening emphasis;
+		// strikethrough only takes two characters '~~'
+		if isSpace(data[1]) {
+			return 0, nil
+		}
+		if p.extensions&SuperSubscript != 0 && c == '~' {
+			// potential subscript, no spaces, except when escaped, helperEmphasis does
+			// not check that for us, so walk the bytes and check.
+			ret := skipUntilChar(data[1:], 0, c)
+			if ret == 0 {
+				return 0, nil
+			}
+			ret++ // we started with data[1:] above.
+			for i := 1; i < ret; i++ {
+				if isSpace(data[i]) && !isEscape(data, i) {
+					return 0, nil
+				}
+			}
+			sub := &ast.Subscript{}
+			sub.Literal = data[1:ret]
+			return ret + 1, sub
+		}
+		ret, node := helperEmphasis(p, data[1:], c)
+		if ret == 0 {
+			return 0, nil
+		}
+
+		return ret + 1, node
+	}
+
+	if n > 3 && data[1] == c && data[2] != c {
+		if isSpace(data[2]) {
+			return 0, nil
+		}
+		ret, node := helperDoubleEmphasis(p, data[2:], c)
+		if ret == 0 {
+			return 0, nil
+		}
+
+		return ret + 2, node
+	}
+
+	if n > 4 && data[1] == c && data[2] == c && data[3] != c {
+		if c == '~' || isSpace(data[3]) {
+			return 0, nil
+		}
+		ret, node := helperTripleEmphasis(p, data, 3, c)
+		if ret == 0 {
+			return 0, nil
+		}
+
+		return ret + 3, node
+	}
+
+	return 0, nil
+}
+
+func codeSpan(p *Parser, data []byte, offset int) (int, ast.Node) {
+	data = data[offset:]
+
+	// count the number of backticks in the delimiter
+	nb := skipChar(data, 0, '`')
+
+	// find the next delimiter
+	i, end := 0, 0
+	for end = nb; end < len(data) && i < nb; end++ {
+		if data[end] == '`' {
+			i++
+		} else {
+			i = 0
+		}
+	}
+
+	// no matching delimiter?
+	if i < nb && end >= len(data) {
+		return 0, nil
+	}
+
+	// trim outside whitespace
+	fBegin := nb
+	for fBegin < end && data[fBegin] == ' ' {
+		fBegin++
+	}
+
+	fEnd := end - nb
+	for fEnd > fBegin && data[fEnd-1] == ' ' {
+		fEnd--
+	}
+
+	// render the code span
+	if fBegin != fEnd {
+		code := &ast.Code{}
+		code.Literal = data[fBegin:fEnd]
+		return end, code
+	}
+
+	return end, nil
+}
+
+// newline preceded by two spaces becomes <br>
+func maybeLineBreak(p *Parser, data []byte, offset int) (int, ast.Node) {
+	origOffset := offset
+	offset = skipChar(data, offset, ' ')
+
+	if offset < len(data) && data[offset] == '\n' {
+		if offset-origOffset >= 2 {
+			return offset - origOffset + 1, &ast.Hardbreak{}
+		}
+		return offset - origOffset, nil
+	}
+	return 0, nil
+}
+
+// newline without two spaces works when HardLineBreak is enabled
+func lineBreak(p *Parser, data []byte, offset int) (int, ast.Node) {
+	if p.extensions&HardLineBreak != 0 {
+		return 1, &ast.Hardbreak{}
+	}
+	return 0, nil
+}
+
+type linkType int
+
+const (
+	linkNormal linkType = iota
+	linkImg
+	linkDeferredFootnote
+	linkInlineFootnote
+	linkCitation
+)
+
+func isReferenceStyleLink(data []byte, pos int, t linkType) bool {
+	if t == linkDeferredFootnote {
+		return false
+	}
+	return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^'
+}
+
+func maybeImage(p *Parser, data []byte, offset int) (int, ast.Node) {
+	if offset < len(data)-1 && data[offset+1] == '[' {
+		return link(p, data, offset)
+	}
+	return 0, nil
+}
+
+func maybeInlineFootnoteOrSuper(p *Parser, data []byte, offset int) (int, ast.Node) {
+	if offset < len(data)-1 && data[offset+1] == '[' {
+		return link(p, data, offset)
+	}
+
+	if p.extensions&SuperSubscript != 0 {
+		ret := skipUntilChar(data[offset:], 1, '^')
+		if ret == 0 {
+			return 0, nil
+		}
+		for i := offset; i < offset+ret; i++ {
+			if isSpace(data[i]) && !isEscape(data, i) {
+				return 0, nil
+			}
+		}
+		sup := &ast.Superscript{}
+		sup.Literal = data[offset+1 : offset+ret]
+		return offset + ret, sup
+	}
+
+	return 0, nil
+}
+
+// '[': parse a link or an image or a footnote or a citation
+func link(p *Parser, data []byte, offset int) (int, ast.Node) {
+	// no links allowed inside regular links, footnote, and deferred footnotes
+	if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') {
+		return 0, nil
+	}
+
+	var t linkType
+	switch {
+	// special case: ![^text] == deferred footnote (that follows something with
+	// an exclamation point)
+	case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^':
+		t = linkDeferredFootnote
+	// ![alt] == image
+	case offset >= 0 && data[offset] == '!':
+		t = linkImg
+		offset++
+	// [@citation], [@-citation], [@?citation], [@!citation]
+	case p.extensions&Mmark != 0 && len(data)-1 > offset && data[offset+1] == '@':
+		t = linkCitation
+	// [text] == regular link
+	// ^[text] == inline footnote
+	// [^refId] == deferred footnote
+	case p.extensions&Footnotes != 0:
+		if offset >= 0 && data[offset] == '^' {
+			t = linkInlineFootnote
+			offset++
+		} else if len(data)-1 > offset && data[offset+1] == '^' {
+			t = linkDeferredFootnote
+		}
+	default:
+		t = linkNormal
+	}
+
+	data = data[offset:]
+
+	if t == linkCitation {
+		return citation(p, data, 0)
+	}
+
+	var (
+		i                               = 1
+		noteID                          int
+		title, link, linkID, altContent []byte
+		textHasNl                       = false
+	)
+
+	if t == linkDeferredFootnote {
+		i++
+	}
+
+	// look for the matching closing bracket
+	for level := 1; level > 0 && i < len(data); i++ {
+		switch {
+		case data[i] == '\n':
+			textHasNl = true
+
+		case data[i-1] == '\\':
+			continue
+
+		case data[i] == '[':
+			level++
+
+		case data[i] == ']':
+			level--
+			if level <= 0 {
+				i-- // compensate for extra i++ in for loop
+			}
+		}
+	}
+
+	if i >= len(data) {
+		return 0, nil
+	}
+
+	txtE := i
+	i++
+	var footnoteNode ast.Node
+
+	// skip any amount of whitespace or newline
+	// (this is much more lax than original markdown syntax)
+	i = skipSpace(data, i)
+
+	// inline style link
+	switch {
+	case i < len(data) && data[i] == '(':
+		// skip initial whitespace
+		i++
+
+		i = skipSpace(data, i)
+
+		linkB := i
+
+		// look for link end: ' " )
+	findlinkend:
+		for i < len(data) {
+			switch {
+			case data[i] == '\\':
+				i += 2
+
+			case data[i] == ')' || data[i] == '\'' || data[i] == '"':
+				break findlinkend
+
+			default:
+				i++
+			}
+		}
+
+		if i >= len(data) {
+			return 0, nil
+		}
+		linkE := i
+
+		// look for title end if present
+		titleB, titleE := 0, 0
+		if data[i] == '\'' || data[i] == '"' {
+			i++
+			titleB = i
+
+		findtitleend:
+			for i < len(data) {
+				switch {
+				case data[i] == '\\':
+					i += 2
+
+				case data[i] == ')':
+					break findtitleend
+
+				default:
+					i++
+				}
+			}
+
+			if i >= len(data) {
+				return 0, nil
+			}
+
+			// skip whitespace after title
+			titleE = i - 1
+			for titleE > titleB && isSpace(data[titleE]) {
+				titleE--
+			}
+
+			// check for closing quote presence
+			if data[titleE] != '\'' && data[titleE] != '"' {
+				titleB, titleE = 0, 0
+				linkE = i
+			}
+		}
+
+		// remove whitespace at the end of the link
+		for linkE > linkB && isSpace(data[linkE-1]) {
+			linkE--
+		}
+
+		// remove optional angle brackets around the link
+		if data[linkB] == '<' {
+			linkB++
+		}
+		if data[linkE-1] == '>' {
+			linkE--
+		}
+
+		// build escaped link and title
+		if linkE > linkB {
+			link = data[linkB:linkE]
+		}
+
+		if titleE > titleB {
+			title = data[titleB:titleE]
+		}
+
+		i++
+
+	// reference style link
+	case isReferenceStyleLink(data, i, t):
+		var id []byte
+		altContentConsidered := false
+
+		// look for the id
+		i++
+		linkB := i
+		i = skipUntilChar(data, i, ']')
+
+		if i >= len(data) {
+			return 0, nil
+		}
+		linkE := i
+
+		// find the reference
+		if linkB == linkE {
+			if textHasNl {
+				var b bytes.Buffer
+
+				for j := 1; j < txtE; j++ {
+					switch {
+					case data[j] != '\n':
+						b.WriteByte(data[j])
+					case data[j-1] != ' ':
+						b.WriteByte(' ')
+					}
+				}
+
+				id = b.Bytes()
+			} else {
+				id = data[1:txtE]
+				altContentConsidered = true
+			}
+		} else {
+			id = data[linkB:linkE]
+		}
+
+		// find the reference with matching id
+		lr, ok := p.getRef(string(id))
+		if !ok {
+			return 0, nil
+		}
+
+		// keep link and title from reference
+		linkID = id
+		link = lr.link
+		title = lr.title
+		if altContentConsidered {
+			altContent = lr.text
+		}
+		i++
+
+	// shortcut reference style link or reference or inline footnote
+	default:
+		var id []byte
+
+		// craft the id
+		if textHasNl {
+			var b bytes.Buffer
+
+			for j := 1; j < txtE; j++ {
+				switch {
+				case data[j] != '\n':
+					b.WriteByte(data[j])
+				case data[j-1] != ' ':
+					b.WriteByte(' ')
+				}
+			}
+
+			id = b.Bytes()
+		} else {
+			if t == linkDeferredFootnote {
+				id = data[2:txtE] // get rid of the ^
+			} else {
+				id = data[1:txtE]
+			}
+		}
+
+		footnoteNode = &ast.ListItem{}
+		if t == linkInlineFootnote {
+			// create a new reference
+			noteID = len(p.notes) + 1
+
+			var fragment []byte
+			if len(id) > 0 {
+				if len(id) < 16 {
+					fragment = make([]byte, len(id))
+				} else {
+					fragment = make([]byte, 16)
+				}
+				copy(fragment, slugify(id))
+			} else {
+				fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...)
+			}
+
+			ref := &reference{
+				noteID:   noteID,
+				hasBlock: false,
+				link:     fragment,
+				title:    id,
+				footnote: footnoteNode,
+			}
+
+			p.notes = append(p.notes, ref)
+			p.refsRecord[string(ref.link)] = struct{}{}
+
+			link = ref.link
+			title = ref.title
+		} else {
+			// find the reference with matching id
+			lr, ok := p.getRef(string(id))
+			if !ok {
+				return 0, nil
+			}
+
+			if t == linkDeferredFootnote && !p.isFootnote(lr) {
+				lr.noteID = len(p.notes) + 1
+				lr.footnote = footnoteNode
+				p.notes = append(p.notes, lr)
+				p.refsRecord[string(lr.link)] = struct{}{}
+			}
+
+			// keep link and title from reference
+			link = lr.link
+			// if inline footnote, title == footnote contents
+			title = lr.title
+			noteID = lr.noteID
+		}
+
+		// rewind the whitespace
+		i = txtE + 1
+	}
+
+	var uLink []byte
+	if t == linkNormal || t == linkImg {
+		if len(link) > 0 {
+			var uLinkBuf bytes.Buffer
+			unescapeText(&uLinkBuf, link)
+			uLink = uLinkBuf.Bytes()
+		}
+
+		// links need something to click on and somewhere to go
+		if len(uLink) == 0 || (t == linkNormal && txtE <= 1) {
+			return 0, nil
+		}
+	}
+
+	// call the relevant rendering function
+	switch t {
+	case linkNormal:
+		link := &ast.Link{
+			Destination: normalizeURI(uLink),
+			Title:       title,
+			DeferredID:  linkID,
+		}
+		if len(altContent) > 0 {
+			ast.AppendChild(link, newTextNode(altContent))
+		} else {
+			// links cannot contain other links, so turn off link parsing
+			// temporarily and recurse
+			insideLink := p.insideLink
+			p.insideLink = true
+			p.Inline(link, data[1:txtE])
+			p.insideLink = insideLink
+		}
+		return i, link
+
+	case linkImg:
+		image := &ast.Image{
+			Destination: uLink,
+			Title:       title,
+		}
+		ast.AppendChild(image, newTextNode(data[1:txtE]))
+		return i + 1, image
+
+	case linkInlineFootnote, linkDeferredFootnote:
+		link := &ast.Link{
+			Destination: link,
+			Title:       title,
+			NoteID:      noteID,
+			Footnote:    footnoteNode,
+		}
+		if t == linkDeferredFootnote {
+			link.DeferredID = data[2:txtE]
+		}
+		if t == linkInlineFootnote {
+			i++
+		}
+		return i, link
+
+	default:
+		return 0, nil
+	}
+}
+
+func (p *Parser) inlineHTMLComment(data []byte) int {
+	if len(data) < 5 {
+		return 0
+	}
+	if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' {
+		return 0
+	}
+	i := 5
+	// scan for an end-of-comment marker, across lines if necessary
+	for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') {
+		i++
+	}
+	// no end-of-comment marker
+	if i >= len(data) {
+		return 0
+	}
+	return i + 1
+}
+
+func stripMailto(link []byte) []byte {
+	if bytes.HasPrefix(link, []byte("mailto://")) {
+		return link[9:]
+	} else if bytes.HasPrefix(link, []byte("mailto:")) {
+		return link[7:]
+	} else {
+		return link
+	}
+}
+
+// autolinkType specifies a kind of autolink that gets detected.
+type autolinkType int
+
+// These are the possible flag values for the autolink renderer.
+const (
+	notAutolink autolinkType = iota
+	normalAutolink
+	emailAutolink
+)
+
+// '<' when tags or autolinks are allowed
+func leftAngle(p *Parser, data []byte, offset int) (int, ast.Node) {
+	data = data[offset:]
+
+	if p.extensions&Mmark != 0 {
+		id, consumed := IsCallout(data)
+		if consumed > 0 {
+			node := &ast.Callout{}
+			node.ID = id
+			return consumed, node
+		}
+	}
+
+	altype, end := tagLength(data)
+	if size := p.inlineHTMLComment(data); size > 0 {
+		end = size
+	}
+	if end <= 2 {
+		return end, nil
+	}
+	if altype == notAutolink {
+		htmlTag := &ast.HTMLSpan{}
+		htmlTag.Literal = data[:end]
+		return end, htmlTag
+	}
+
+	var uLink bytes.Buffer
+	unescapeText(&uLink, data[1:end+1-2])
+	if uLink.Len() <= 0 {
+		return end, nil
+	}
+	link := uLink.Bytes()
+	node := &ast.Link{
+		Destination: link,
+	}
+	if altype == emailAutolink {
+		node.Destination = append([]byte("mailto:"), link...)
+	}
+	ast.AppendChild(node, newTextNode(stripMailto(link)))
+	return end, node
+}
+
+// '\\' backslash escape
+var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~")
+
+func escape(p *Parser, data []byte, offset int) (int, ast.Node) {
+	data = data[offset:]
+
+	if len(data) <= 1 {
+		return 2, nil
+	}
+
+	if p.extensions&NonBlockingSpace != 0 && data[1] == ' ' {
+		return 2, &ast.NonBlockingSpace{}
+	}
+
+	if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' {
+		return 2, &ast.Hardbreak{}
+	}
+
+	if bytes.IndexByte(escapeChars, data[1]) < 0 {
+		return 0, nil
+	}
+
+	return 2, newTextNode(data[1:2])
+}
+
+func unescapeText(ob *bytes.Buffer, src []byte) {
+	i := 0
+	for i < len(src) {
+		org := i
+		for i < len(src) && src[i] != '\\' {
+			i++
+		}
+
+		if i > org {
+			ob.Write(src[org:i])
+		}
+
+		if i+1 >= len(src) {
+			break
+		}
+
+		ob.WriteByte(src[i+1])
+		i += 2
+	}
+}
+
+// '&' escaped when it doesn't belong to an entity
+// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+;
+func entity(p *Parser, data []byte, offset int) (int, ast.Node) {
+	data = data[offset:]
+
+	end := skipCharN(data, 1, '#', 1)
+	end = skipAlnum(data, end)
+
+	if end < len(data) && data[end] == ';' {
+		end++ // real entity
+	} else {
+		return 0, nil // lone '&'
+	}
+
+	ent := data[:end]
+	// undo &amp; escaping or it will be converted to &amp;amp; by another
+	// escaper in the renderer
+	if bytes.Equal(ent, []byte("&amp;")) {
+		ent = []byte{'&'}
+	}
+
+	return end, newTextNode(ent)
+}
+
+func linkEndsWithEntity(data []byte, linkEnd int) bool {
+	entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1)
+	return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd
+}
+
+// hasPrefixCaseInsensitive is a custom implementation of
+//     strings.HasPrefix(strings.ToLower(s), prefix)
+// we rolled our own because ToLower pulls in a huge machinery of lowercasing
+// anything from Unicode and that's very slow. Since this func will only be
+// used on ASCII protocol prefixes, we can take shortcuts.
+func hasPrefixCaseInsensitive(s, prefix []byte) bool {
+	if len(s) < len(prefix) {
+		return false
+	}
+	delta := byte('a' - 'A')
+	for i, b := range prefix {
+		if b != s[i] && b != s[i]+delta {
+			return false
+		}
+	}
+	return true
+}
+
+var protocolPrefixes = [][]byte{
+	[]byte("http://"),
+	[]byte("https://"),
+	[]byte("ftp://"),
+	[]byte("file://"),
+	[]byte("mailto:"),
+}
+
+const shortestPrefix = 6 // len("ftp://"), the shortest of the above
+
+func maybeAutoLink(p *Parser, data []byte, offset int) (int, ast.Node) {
+	// quick check to rule out most false hits
+	if p.insideLink || len(data) < offset+shortestPrefix {
+		return 0, nil
+	}
+	for _, prefix := range protocolPrefixes {
+		endOfHead := offset + 8 // 8 is the len() of the longest prefix
+		if endOfHead > len(data) {
+			endOfHead = len(data)
+		}
+		if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) {
+			return autoLink(p, data, offset)
+		}
+	}
+	return 0, nil
+}
+
+func autoLink(p *Parser, data []byte, offset int) (int, ast.Node) {
+	// Now a more expensive check to see if we're not inside an anchor element
+	anchorStart := offset
+	offsetFromAnchor := 0
+	for anchorStart > 0 && data[anchorStart] != '<' {
+		anchorStart--
+		offsetFromAnchor++
+	}
+
+	anchorStr := anchorRe.Find(data[anchorStart:])
+	if anchorStr != nil {
+		anchorClose := &ast.HTMLSpan{}
+		anchorClose.Literal = anchorStr[offsetFromAnchor:]
+		return len(anchorStr) - offsetFromAnchor, anchorClose
+	}
+
+	// scan backward for a word boundary
+	rewind := 0
+	for offset-rewind > 0 && rewind <= 7 && isLetter(data[offset-rewind-1]) {
+		rewind++
+	}
+	if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters
+		return 0, nil
+	}
+
+	origData := data
+	data = data[offset-rewind:]
+
+	if !isSafeLink(data) {
+		return 0, nil
+	}
+
+	linkEnd := 0
+	for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) {
+		linkEnd++
+	}
+
+	// Skip punctuation at the end of the link
+	if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' {
+		linkEnd--
+	}
+
+	// But don't skip semicolon if it's a part of escaped entity:
+	if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) {
+		linkEnd--
+	}
+
+	// See if the link finishes with a punctuation sign that can be closed.
+	var copen byte
+	switch data[linkEnd-1] {
+	case '"':
+		copen = '"'
+	case '\'':
+		copen = '\''
+	case ')':
+		copen = '('
+	case ']':
+		copen = '['
+	case '}':
+		copen = '{'
+	default:
+		copen = 0
+	}
+
+	if copen != 0 {
+		bufEnd := offset - rewind + linkEnd - 2
+
+		openDelim := 1
+
+		/* Try to close the final punctuation sign in this same line;
+		 * if we managed to close it outside of the URL, that means that it's
+		 * not part of the URL. If it closes inside the URL, that means it
+		 * is part of the URL.
+		 *
+		 * Examples:
+		 *
+		 *      foo http://www.pokemon.com/Pikachu_(Electric) bar
+		 *              => http://www.pokemon.com/Pikachu_(Electric)
+		 *
+		 *      foo (http://www.pokemon.com/Pikachu_(Electric)) bar
+		 *              => http://www.pokemon.com/Pikachu_(Electric)
+		 *
+		 *      foo http://www.pokemon.com/Pikachu_(Electric)) bar
+		 *              => http://www.pokemon.com/Pikachu_(Electric))
+		 *
+		 *      (foo http://www.pokemon.com/Pikachu_(Electric)) bar
+		 *              => foo http://www.pokemon.com/Pikachu_(Electric)
+		 */
+
+		for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 {
+			if origData[bufEnd] == data[linkEnd-1] {
+				openDelim++
+			}
+
+			if origData[bufEnd] == copen {
+				openDelim--
+			}
+
+			bufEnd--
+		}
+
+		if openDelim == 0 {
+			linkEnd--
+		}
+	}
+
+	var uLink bytes.Buffer
+	unescapeText(&uLink, data[:linkEnd])
+
+	if uLink.Len() > 0 {
+		node := &ast.Link{
+			Destination: uLink.Bytes(),
+		}
+		ast.AppendChild(node, newTextNode(uLink.Bytes()))
+		return linkEnd, node
+	}
+
+	return linkEnd, nil
+}
+
+func isEndOfLink(char byte) bool {
+	return isSpace(char) || char == '<'
+}
+
+var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")}
+var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")}
+
+func isSafeLink(link []byte) bool {
+	nLink := len(link)
+	for _, path := range validPaths {
+		nPath := len(path)
+		linkPrefix := link[:nPath]
+		if nLink >= nPath && bytes.Equal(linkPrefix, path) {
+			if nLink == nPath {
+				return true
+			} else if isAlnum(link[nPath]) {
+				return true
+			}
+		}
+	}
+
+	for _, prefix := range validUris {
+		// TODO: handle unicode here
+		// case-insensitive prefix test
+		nPrefix := len(prefix)
+		if nLink > nPrefix {
+			linkPrefix := bytes.ToLower(link[:nPrefix])
+			if bytes.Equal(linkPrefix, prefix) && isAlnum(link[nPrefix]) {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+// return the length of the given tag, or 0 is it's not valid
+func tagLength(data []byte) (autolink autolinkType, end int) {
+	var i, j int
+
+	// a valid tag can't be shorter than 3 chars
+	if len(data) < 3 {
+		return notAutolink, 0
+	}
+
+	// begins with a '<' optionally followed by '/', followed by letter or number
+	if data[0] != '<' {
+		return notAutolink, 0
+	}
+	if data[1] == '/' {
+		i = 2
+	} else {
+		i = 1
+	}
+
+	if !isAlnum(data[i]) {
+		return notAutolink, 0
+	}
+
+	// scheme test
+	autolink = notAutolink
+
+	// try to find the beginning of an URI
+	for i < len(data) && (isAlnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') {
+		i++
+	}
+
+	if i > 1 && i < len(data) && data[i] == '@' {
+		if j = isMailtoAutoLink(data[i:]); j != 0 {
+			return emailAutolink, i + j
+		}
+	}
+
+	if i > 2 && i < len(data) && data[i] == ':' {
+		autolink = normalAutolink
+		i++
+	}
+
+	// complete autolink test: no whitespace or ' or "
+	switch {
+	case i >= len(data):
+		autolink = notAutolink
+	case autolink != notAutolink:
+		j = i
+
+		for i < len(data) {
+			if data[i] == '\\' {
+				i += 2
+			} else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isSpace(data[i]) {
+				break
+			} else {
+				i++
+			}
+
+		}
+
+		if i >= len(data) {
+			return autolink, 0
+		}
+		if i > j && data[i] == '>' {
+			return autolink, i + 1
+		}
+
+		// one of the forbidden chars has been found
+		autolink = notAutolink
+	}
+	i += bytes.IndexByte(data[i:], '>')
+	if i < 0 {
+		return autolink, 0
+	}
+	return autolink, i + 1
+}
+
+// look for the address part of a mail autolink and '>'
+// this is less strict than the original markdown e-mail address matching
+func isMailtoAutoLink(data []byte) int {
+	nb := 0
+
+	// address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@'
+	for i, c := range data {
+		if isAlnum(c) {
+			continue
+		}
+
+		switch c {
+		case '@':
+			nb++
+
+		case '-', '.', '_':
+			break
+
+		case '>':
+			if nb == 1 {
+				return i + 1
+			}
+			return 0
+		default:
+			return 0
+		}
+	}
+
+	return 0
+}
+
+// look for the next emph char, skipping other constructs
+func helperFindEmphChar(data []byte, c byte) int {
+	i := 0
+
+	for i < len(data) {
+		for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' {
+			i++
+		}
+		if i >= len(data) {
+			return 0
+		}
+		// do not count escaped chars
+		if i != 0 && data[i-1] == '\\' {
+			i++
+			continue
+		}
+		if data[i] == c {
+			return i
+		}
+
+		if data[i] == '`' {
+			// skip a code span
+			tmpI := 0
+			i++
+			for i < len(data) && data[i] != '`' {
+				if tmpI == 0 && data[i] == c {
+					tmpI = i
+				}
+				i++
+			}
+			if i >= len(data) {
+				return tmpI
+			}
+			i++
+		} else if data[i] == '[' {
+			// skip a link
+			tmpI := 0
+			i++
+			for i < len(data) && data[i] != ']' {
+				if tmpI == 0 && data[i] == c {
+					tmpI = i
+				}
+				i++
+			}
+			i++
+			for i < len(data) && (data[i] == ' ' || data[i] == '\n') {
+				i++
+			}
+			if i >= len(data) {
+				return tmpI
+			}
+			if data[i] != '[' && data[i] != '(' { // not a link
+				if tmpI > 0 {
+					return tmpI
+				}
+				continue
+			}
+			cc := data[i]
+			i++
+			for i < len(data) && data[i] != cc {
+				if tmpI == 0 && data[i] == c {
+					return i
+				}
+				i++
+			}
+			if i >= len(data) {
+				return tmpI
+			}
+			i++
+		}
+	}
+	return 0
+}
+
+func helperEmphasis(p *Parser, data []byte, c byte) (int, ast.Node) {
+	i := 0
+
+	// skip one symbol if coming from emph3
+	if len(data) > 1 && data[0] == c && data[1] == c {
+		i = 1
+	}
+
+	for i < len(data) {
+		length := helperFindEmphChar(data[i:], c)
+		if length == 0 {
+			return 0, nil
+		}
+		i += length
+		if i >= len(data) {
+			return 0, nil
+		}
+
+		if i+1 < len(data) && data[i+1] == c {
+			i++
+			continue
+		}
+
+		if data[i] == c && !isSpace(data[i-1]) {
+
+			if p.extensions&NoIntraEmphasis != 0 {
+				if !(i+1 == len(data) || isSpace(data[i+1]) || isPunctuation(data[i+1])) {
+					continue
+				}
+			}
+
+			emph := &ast.Emph{}
+			p.Inline(emph, data[:i])
+			return i + 1, emph
+		}
+	}
+
+	return 0, nil
+}
+
+func helperDoubleEmphasis(p *Parser, data []byte, c byte) (int, ast.Node) {
+	i := 0
+
+	for i < len(data) {
+		length := helperFindEmphChar(data[i:], c)
+		if length == 0 {
+			return 0, nil
+		}
+		i += length
+
+		if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isSpace(data[i-1]) {
+			var node ast.Node = &ast.Strong{}
+			if c == '~' {
+				node = &ast.Del{}
+			}
+			p.Inline(node, data[:i])
+			return i + 2, node
+		}
+		i++
+	}
+	return 0, nil
+}
+
+func helperTripleEmphasis(p *Parser, data []byte, offset int, c byte) (int, ast.Node) {
+	i := 0
+	origData := data
+	data = data[offset:]
+
+	for i < len(data) {
+		length := helperFindEmphChar(data[i:], c)
+		if length == 0 {
+			return 0, nil
+		}
+		i += length
+
+		// skip whitespace preceded symbols
+		if data[i] != c || isSpace(data[i-1]) {
+			continue
+		}
+
+		switch {
+		case i+2 < len(data) && data[i+1] == c && data[i+2] == c:
+			// triple symbol found
+			strong := &ast.Strong{}
+			em := &ast.Emph{}
+			ast.AppendChild(strong, em)
+			p.Inline(em, data[:i])
+			return i + 3, strong
+		case i+1 < len(data) && data[i+1] == c:
+			// double symbol found, hand over to emph1
+			length, node := helperEmphasis(p, origData[offset-2:], c)
+			if length == 0 {
+				return 0, nil
+			}
+			return length - 2, node
+		default:
+			// single symbol found, hand over to emph2
+			length, node := helperDoubleEmphasis(p, origData[offset-1:], c)
+			if length == 0 {
+				return 0, nil
+			}
+			return length - 1, node
+		}
+	}
+	return 0, nil
+}
+
+// math handle inline math wrapped with '$'
+func math(p *Parser, data []byte, offset int) (int, ast.Node) {
+	data = data[offset:]
+
+	// too short, or block math
+	if len(data) <= 2 || data[1] == '$' {
+		return 0, nil
+	}
+
+	// find next '$'
+	var end int
+	for end = 1; end < len(data) && data[end] != '$'; end++ {
+	}
+
+	// $ not match
+	if end == len(data) {
+		return 0, nil
+	}
+
+	// create inline math node
+	math := &ast.Math{}
+	math.Literal = data[1:end]
+	return end + 1, math
+}
+
+func newTextNode(d []byte) *ast.Text {
+	return &ast.Text{ast.Leaf{Literal: d}}
+}
+
+func normalizeURI(s []byte) []byte {
+	return s // TODO: implement
+}
diff --git a/vendor/github.com/gomarkdown/markdown/parser/matter.go b/vendor/github.com/gomarkdown/markdown/parser/matter.go
new file mode 100644
index 00000000..92686357
--- /dev/null
+++ b/vendor/github.com/gomarkdown/markdown/parser/matter.go
@@ -0,0 +1,36 @@
+package parser
+
+import (
+	"bytes"
+
+	"github.com/gomarkdown/markdown/ast"
+)
+
+func (p *Parser) documentMatter(data []byte) int {
+	if data[0] != '{' {
+		return 0
+	}
+
+	consumed := 0
+	matter := ast.DocumentMatterNone
+	if bytes.HasPrefix(data, []byte("{frontmatter}")) {
+		consumed = len("{frontmatter}")
+		matter = ast.DocumentMatterFront
+	}
+	if bytes.HasPrefix(data, []byte("{mainmatter}")) {
+		consumed = len("{mainmatter}")
+		matter = ast.DocumentMatterMain
+	}
+	if bytes.HasPrefix(data, []byte("{backmatter}")) {
+		consumed = len("{backmatter}")
+		matter = ast.DocumentMatterBack
+	}
+	if consumed == 0 {
+		return 0
+	}
+	node := &ast.DocumentMatter{Matter: matter}
+	p.addBlock(node)
+	p.finalize(node)
+
+	return consumed
+}
diff --git a/vendor/github.com/gomarkdown/markdown/parser/options.go b/vendor/github.com/gomarkdown/markdown/parser/options.go
new file mode 100644
index 00000000..d3d0c088
--- /dev/null
+++ b/vendor/github.com/gomarkdown/markdown/parser/options.go
@@ -0,0 +1,32 @@
+package parser
+
+import (
+	"github.com/gomarkdown/markdown/ast"
+)
+
+// Flags control optional behavior of parser.
+type Flags int
+
+// Options is a collection of supplementary parameters tweaking the behavior of various parts of the parser.
+type Options struct {
+	ParserHook    BlockFunc
+	ReadIncludeFn ReadIncludeFunc
+
+	Flags Flags // Flags allow customizing parser's behavior
+}
+
+// Parser renderer configuration options.
+const (
+	FlagsNone        Flags = 0
+	SkipFootnoteList Flags = 1 << iota // Skip adding the footnote list (regardless if they are parsed)
+)
+
+// BlockFunc allows to registration of a parser function. If successful it
+// returns an ast.Node, a buffer that should be parsed as a block and the the number of bytes consumed.
+type BlockFunc func(data []byte) (ast.Node, []byte, int)
+
+// ReadIncludeFunc should read the file under path and returns the read bytes,
+// from will be set to the name of the current file being parsed. Initially
+// this will be empty. address is the optional address specifier of which lines
+// of the file to return. If this function is not set no data will be read.
+type ReadIncludeFunc func(from, path string, address []byte) []byte
diff --git a/vendor/github.com/gomarkdown/markdown/parser/parser.go b/vendor/github.com/gomarkdown/markdown/parser/parser.go
new file mode 100644
index 00000000..c7302dfd
--- /dev/null
+++ b/vendor/github.com/gomarkdown/markdown/parser/parser.go
@@ -0,0 +1,812 @@
+/*
+Package parser implements parser for markdown text that generates AST (abstract syntax tree).
+*/
+package parser
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+	"unicode/utf8"
+
+	"github.com/gomarkdown/markdown/ast"
+)
+
+// Extensions is a bitmask of enabled parser extensions.
+type Extensions int
+
+// Bit flags representing markdown parsing extensions.
+// Use | (or) to specify multiple extensions.
+const (
+	NoExtensions           Extensions = 0
+	NoIntraEmphasis        Extensions = 1 << iota // Ignore emphasis markers inside words
+	Tables                                        // Parse tables
+	FencedCode                                    // Parse fenced code blocks
+	Autolink                                      // Detect embedded URLs that are not explicitly marked
+	Strikethrough                                 // Strikethrough text using ~~test~~
+	LaxHTMLBlocks                                 // Loosen up HTML block parsing rules
+	SpaceHeadings                                 // Be strict about prefix heading rules
+	HardLineBreak                                 // Translate newlines into line breaks
+	NonBlockingSpace                              // Translate backspace spaces into line non-blocking spaces
+	TabSizeEight                                  // Expand tabs to eight spaces instead of four
+	Footnotes                                     // Pandoc-style footnotes
+	NoEmptyLineBeforeBlock                        // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block
+	HeadingIDs                                    // specify heading IDs  with {#id}
+	Titleblock                                    // Titleblock ala pandoc
+	AutoHeadingIDs                                // Create the heading ID from the text
+	BackslashLineBreak                            // Translate trailing backslashes into line breaks
+	DefinitionLists                               // Parse definition lists
+	MathJax                                       // Parse MathJax
+	OrderedListStart                              // Keep track of the first number used when starting an ordered list.
+	Attributes                                    // Block Attributes
+	SuperSubscript                                // Super- and subscript support: 2^10^, H~2~O.
+	EmptyLinesBreakList                           // 2 empty lines break out of list
+	Includes                                      // Support including other files.
+	Mmark                                         // Support Mmark syntax, see https://mmark.nl/syntax
+
+	CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode |
+		Autolink | Strikethrough | SpaceHeadings | HeadingIDs |
+		BackslashLineBreak | DefinitionLists | MathJax
+)
+
+// The size of a tab stop.
+const (
+	tabSizeDefault = 4
+	tabSizeDouble  = 8
+)
+
+// for each character that triggers a response when parsing inline data.
+type inlineParser func(p *Parser, data []byte, offset int) (int, ast.Node)
+
+// ReferenceOverrideFunc is expected to be called with a reference string and
+// return either a valid Reference type that the reference string maps to or
+// nil. If overridden is false, the default reference logic will be executed.
+// See the documentation in Options for more details on use-case.
+type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool)
+
+// Parser is a type that holds extensions and the runtime state used by
+// Parse, and the renderer. You can not use it directly, construct it with New.
+type Parser struct {
+
+	// ReferenceOverride is an optional function callback that is called every
+	// time a reference is resolved. It can be set before starting parsing.
+	//
+	// In Markdown, the link reference syntax can be made to resolve a link to
+	// a reference instead of an inline URL, in one of the following ways:
+	//
+	//  * [link text][refid]
+	//  * [refid][]
+	//
+	// Usually, the refid is defined at the bottom of the Markdown document. If
+	// this override function is provided, the refid is passed to the override
+	// function first, before consulting the defined refids at the bottom. If
+	// the override function indicates an override did not occur, the refids at
+	// the bottom will be used to fill in the link details.
+	ReferenceOverride ReferenceOverrideFunc
+
+	Opts Options
+
+	// after parsing, this is AST root of parsed markdown text
+	Doc ast.Node
+
+	extensions Extensions
+
+	refs           map[string]*reference
+	refsRecord     map[string]struct{}
+	inlineCallback [256]inlineParser
+	nesting        int
+	maxNesting     int
+	insideLink     bool
+	indexCnt       int // incremented after every index
+
+	// Footnotes need to be ordered as well as available to quickly check for
+	// presence. If a ref is also a footnote, it's stored both in refs and here
+	// in notes. Slice is nil if footnotes not enabled.
+	notes []*reference
+
+	tip                  ast.Node // = doc
+	oldTip               ast.Node
+	lastMatchedContainer ast.Node // = doc
+	allClosed            bool
+
+	// Attributes are attached to block level elements.
+	attr *ast.Attribute
+
+	includeStack *incStack
+}
+
+// New creates a markdown parser with CommonExtensions.
+//
+// You can then call `doc := p.Parse(markdown)` to parse markdown document
+// and `markdown.Render(doc, renderer)` to convert it to another format with
+// a renderer.
+func New() *Parser {
+	return NewWithExtensions(CommonExtensions)
+}
+
+// NewWithExtensions creates a markdown parser with given extensions.
+func NewWithExtensions(extension Extensions) *Parser {
+	p := Parser{
+		refs:         make(map[string]*reference),
+		refsRecord:   make(map[string]struct{}),
+		maxNesting:   16,
+		insideLink:   false,
+		Doc:          &ast.Document{},
+		extensions:   extension,
+		allClosed:    true,
+		includeStack: newIncStack(),
+	}
+	p.tip = p.Doc
+	p.oldTip = p.Doc
+	p.lastMatchedContainer = p.Doc
+
+	p.inlineCallback[' '] = maybeLineBreak
+	p.inlineCallback['*'] = emphasis
+	p.inlineCallback['_'] = emphasis
+	if p.extensions&Strikethrough != 0 {
+		p.inlineCallback['~'] = emphasis
+	}
+	p.inlineCallback['`'] = codeSpan
+	p.inlineCallback['\n'] = lineBreak
+	p.inlineCallback['['] = link
+	p.inlineCallback['<'] = leftAngle
+	p.inlineCallback['\\'] = escape
+	p.inlineCallback['&'] = entity
+	p.inlineCallback['!'] = maybeImage
+	if p.extensions&Mmark != 0 {
+		p.inlineCallback['('] = maybeShortRefOrIndex
+	}
+	p.inlineCallback['^'] = maybeInlineFootnoteOrSuper
+	if p.extensions&Autolink != 0 {
+		p.inlineCallback['h'] = maybeAutoLink
+		p.inlineCallback['m'] = maybeAutoLink
+		p.inlineCallback['f'] = maybeAutoLink
+		p.inlineCallback['H'] = maybeAutoLink
+		p.inlineCallback['M'] = maybeAutoLink
+		p.inlineCallback['F'] = maybeAutoLink
+	}
+	if p.extensions&MathJax != 0 {
+		p.inlineCallback['$'] = math
+	}
+
+	return &p
+}
+
+func (p *Parser) getRef(refid string) (ref *reference, found bool) {
+	if p.ReferenceOverride != nil {
+		r, overridden := p.ReferenceOverride(refid)
+		if overridden {
+			if r == nil {
+				return nil, false
+			}
+			return &reference{
+				link:     []byte(r.Link),
+				title:    []byte(r.Title),
+				noteID:   0,
+				hasBlock: false,
+				text:     []byte(r.Text)}, true
+		}
+	}
+	// refs are case insensitive
+	ref, found = p.refs[strings.ToLower(refid)]
+	return ref, found
+}
+
+func (p *Parser) isFootnote(ref *reference) bool {
+	_, ok := p.refsRecord[string(ref.link)]
+	return ok
+}
+
+func (p *Parser) finalize(block ast.Node) {
+	p.tip = block.GetParent()
+}
+
+func (p *Parser) addChild(node ast.Node) ast.Node {
+	for !canNodeContain(p.tip, node) {
+		p.finalize(p.tip)
+	}
+	ast.AppendChild(p.tip, node)
+	p.tip = node
+	return node
+}
+
+func canNodeContain(n ast.Node, v ast.Node) bool {
+	switch n.(type) {
+	case *ast.List:
+		return isListItem(v)
+	case *ast.Document, *ast.BlockQuote, *ast.Aside, *ast.ListItem, *ast.CaptionFigure:
+		return !isListItem(v)
+	case *ast.Table:
+		switch v.(type) {
+		case *ast.TableHeader, *ast.TableBody, *ast.TableFooter:
+			return true
+		default:
+			return false
+		}
+	case *ast.TableHeader, *ast.TableBody, *ast.TableFooter:
+		_, ok := v.(*ast.TableRow)
+		return ok
+	case *ast.TableRow:
+		_, ok := v.(*ast.TableCell)
+		return ok
+	}
+	return false
+}
+
+func (p *Parser) closeUnmatchedBlocks() {
+	if p.allClosed {
+		return
+	}
+	for p.oldTip != p.lastMatchedContainer {
+		parent := p.oldTip.GetParent()
+		p.finalize(p.oldTip)
+		p.oldTip = parent
+	}
+	p.allClosed = true
+}
+
+// Reference represents the details of a link.
+// See the documentation in Options for more details on use-case.
+type Reference struct {
+	// Link is usually the URL the reference points to.
+	Link string
+	// Title is the alternate text describing the link in more detail.
+	Title string
+	// Text is the optional text to override the ref with if the syntax used was
+	// [refid][]
+	Text string
+}
+
+// Parse generates AST (abstract syntax tree) representing markdown document.
+//
+// The result is a root of the tree whose underlying type is *ast.Document
+//
+// You can then convert AST to html using html.Renderer, to some other format
+// using a custom renderer or transform the tree.
+func (p *Parser) Parse(input []byte) ast.Node {
+	p.block(input)
+	// Walk the tree and finish up some of unfinished blocks
+	for p.tip != nil {
+		p.finalize(p.tip)
+	}
+	// Walk the tree again and process inline markdown in each block
+	ast.WalkFunc(p.Doc, func(node ast.Node, entering bool) ast.WalkStatus {
+		switch node.(type) {
+		case *ast.Paragraph, *ast.Heading, *ast.TableCell:
+			p.Inline(node, node.AsContainer().Content)
+			node.AsContainer().Content = nil
+		}
+		return ast.GoToNext
+	})
+
+	if p.Opts.Flags&SkipFootnoteList == 0 {
+		p.parseRefsToAST()
+	}
+	return p.Doc
+}
+
+func (p *Parser) parseRefsToAST() {
+	if p.extensions&Footnotes == 0 || len(p.notes) == 0 {
+		return
+	}
+	p.tip = p.Doc
+	list := &ast.List{
+		IsFootnotesList: true,
+		ListFlags:       ast.ListTypeOrdered,
+	}
+	p.addBlock(&ast.Footnotes{})
+	block := p.addBlock(list)
+	flags := ast.ListItemBeginningOfList
+	// Note: this loop is intentionally explicit, not range-form. This is
+	// because the body of the loop will append nested footnotes to p.notes and
+	// we need to process those late additions. Range form would only walk over
+	// the fixed initial set.
+	for i := 0; i < len(p.notes); i++ {
+		ref := p.notes[i]
+		p.addChild(ref.footnote)
+		block := ref.footnote
+		listItem := block.(*ast.ListItem)
+		listItem.ListFlags = flags | ast.ListTypeOrdered
+		listItem.RefLink = ref.link
+		if ref.hasBlock {
+			flags |= ast.ListItemContainsBlock
+			p.block(ref.title)
+		} else {
+			p.Inline(block, ref.title)
+		}
+		flags &^= ast.ListItemBeginningOfList | ast.ListItemContainsBlock
+	}
+	above := list.Parent
+	finalizeList(list)
+	p.tip = above
+
+	ast.WalkFunc(block, func(node ast.Node, entering bool) ast.WalkStatus {
+		switch node.(type) {
+		case *ast.Paragraph, *ast.Heading:
+			p.Inline(node, node.AsContainer().Content)
+			node.AsContainer().Content = nil
+		}
+		return ast.GoToNext
+	})
+}
+
+//
+// Link references
+//
+// This section implements support for references that (usually) appear
+// as footnotes in a document, and can be referenced anywhere in the document.
+// The basic format is:
+//
+//    [1]: http://www.google.com/ "Google"
+//    [2]: http://www.github.com/ "Github"
+//
+// Anywhere in the document, the reference can be linked by referring to its
+// label, i.e., 1 and 2 in this example, as in:
+//
+//    This library is hosted on [Github][2], a git hosting site.
+//
+// Actual footnotes as specified in Pandoc and supported by some other Markdown
+// libraries such as php-markdown are also taken care of. They look like this:
+//
+//    This sentence needs a bit of further explanation.[^note]
+//
+//    [^note]: This is the explanation.
+//
+// Footnotes should be placed at the end of the document in an ordered list.
+// Inline footnotes such as:
+//
+//    Inline footnotes^[Not supported.] also exist.
+//
+// are not yet supported.
+
+// reference holds all information necessary for a reference-style links or
+// footnotes.
+//
+// Consider this markdown with reference-style links:
+//
+//     [link][ref]
+//
+//     [ref]: /url/ "tooltip title"
+//
+// It will be ultimately converted to this HTML:
+//
+//     <p><a href=\"/url/\" title=\"title\">link</a></p>
+//
+// And a reference structure will be populated as follows:
+//
+//     p.refs["ref"] = &reference{
+//         link: "/url/",
+//         title: "tooltip title",
+//     }
+//
+// Alternatively, reference can contain information about a footnote. Consider
+// this markdown:
+//
+//     Text needing a footnote.[^a]
+//
+//     [^a]: This is the note
+//
+// A reference structure will be populated as follows:
+//
+//     p.refs["a"] = &reference{
+//         link: "a",
+//         title: "This is the note",
+//         noteID: <some positive int>,
+//     }
+//
+// TODO: As you can see, it begs for splitting into two dedicated structures
+// for refs and for footnotes.
+type reference struct {
+	link     []byte
+	title    []byte
+	noteID   int // 0 if not a footnote ref
+	hasBlock bool
+	footnote ast.Node // a link to the Item node within a list of footnotes
+
+	text []byte // only gets populated by refOverride feature with Reference.Text
+}
+
+func (r *reference) String() string {
+	return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}",
+		r.link, r.title, r.text, r.noteID, r.hasBlock)
+}
+
+// Check whether or not data starts with a reference link.
+// If so, it is parsed and stored in the list of references
+// (in the render struct).
+// Returns the number of bytes to skip to move past it,
+// or zero if the first line is not a reference.
+func isReference(p *Parser, data []byte, tabSize int) int {
+	// up to 3 optional leading spaces
+	if len(data) < 4 {
+		return 0
+	}
+	i := 0
+	for i < 3 && data[i] == ' ' {
+		i++
+	}
+
+	noteID := 0
+
+	// id part: anything but a newline between brackets
+	if data[i] != '[' {
+		return 0
+	}
+	i++
+	if p.extensions&Footnotes != 0 {
+		if i < len(data) && data[i] == '^' {
+			// we can set it to anything here because the proper noteIds will
+			// be assigned later during the second pass. It just has to be != 0
+			noteID = 1
+			i++
+		}
+	}
+	idOffset := i
+	for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
+		i++
+	}
+	if i >= len(data) || data[i] != ']' {
+		return 0
+	}
+	idEnd := i
+	// footnotes can have empty ID, like this: [^], but a reference can not be
+	// empty like this: []. Break early if it's not a footnote and there's no ID
+	if noteID == 0 && idOffset == idEnd {
+		return 0
+	}
+	// spacer: colon (space | tab)* newline? (space | tab)*
+	i++
+	if i >= len(data) || data[i] != ':' {
+		return 0
+	}
+	i++
+	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
+		i++
+	}
+	if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
+		i++
+		if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
+			i++
+		}
+	}
+	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
+		i++
+	}
+	if i >= len(data) {
+		return 0
+	}
+
+	var (
+		linkOffset, linkEnd   int
+		titleOffset, titleEnd int
+		lineEnd               int
+		raw                   []byte
+		hasBlock              bool
+	)
+
+	if p.extensions&Footnotes != 0 && noteID != 0 {
+		linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
+		lineEnd = linkEnd
+	} else {
+		linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
+	}
+	if lineEnd == 0 {
+		return 0
+	}
+
+	// a valid ref has been found
+
+	ref := &reference{
+		noteID:   noteID,
+		hasBlock: hasBlock,
+	}
+
+	if noteID > 0 {
+		// reusing the link field for the id since footnotes don't have links
+		ref.link = data[idOffset:idEnd]
+		// if footnote, it's not really a title, it's the contained text
+		ref.title = raw
+	} else {
+		ref.link = data[linkOffset:linkEnd]
+		ref.title = data[titleOffset:titleEnd]
+	}
+
+	// id matches are case-insensitive
+	id := string(bytes.ToLower(data[idOffset:idEnd]))
+
+	p.refs[id] = ref
+
+	return lineEnd
+}
+
+func scanLinkRef(p *Parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
+	// link: whitespace-free sequence, optionally between angle brackets
+	if data[i] == '<' {
+		i++
+	}
+	linkOffset = i
+	for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
+		i++
+	}
+	linkEnd = i
+	if linkEnd < len(data) && data[linkOffset] == '<' && data[linkEnd-1] == '>' {
+		linkOffset++
+		linkEnd--
+	}
+
+	// optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
+	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
+		i++
+	}
+	if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
+		return
+	}
+
+	// compute end-of-line
+	if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
+		lineEnd = i
+	}
+	if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
+		lineEnd++
+	}
+
+	// optional (space|tab)* spacer after a newline
+	if lineEnd > 0 {
+		i = lineEnd + 1
+		for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
+			i++
+		}
+	}
+
+	// optional title: any non-newline sequence enclosed in '"() alone on its line
+	if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
+		i++
+		titleOffset = i
+
+		// look for EOL
+		for i < len(data) && data[i] != '\n' && data[i] != '\r' {
+			i++
+		}
+		if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
+			titleEnd = i + 1
+		} else {
+			titleEnd = i
+		}
+
+		// step back
+		i--
+		for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
+			i--
+		}
+		if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
+			lineEnd = titleEnd
+			titleEnd = i
+		}
+	}
+
+	return
+}
+
+// The first bit of this logic is the same as Parser.listItem, but the rest
+// is much simpler. This function simply finds the entire block and shifts it
+// over by one tab if it is indeed a block (just returns the line if it's not).
+// blockEnd is the end of the section in the input buffer, and contents is the
+// extracted text that was shifted over one tab. It will need to be rendered at
+// the end of the document.
+func scanFootnote(p *Parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
+	if i == 0 || len(data) == 0 {
+		return
+	}
+
+	// skip leading whitespace on first line
+	for i < len(data) && data[i] == ' ' {
+		i++
+	}
+
+	blockStart = i
+
+	// find the end of the line
+	blockEnd = i
+	for i < len(data) && data[i-1] != '\n' {
+		i++
+	}
+
+	// get working buffer
+	var raw bytes.Buffer
+
+	// put the first line into the working buffer
+	raw.Write(data[blockEnd:i])
+	blockEnd = i
+
+	// process the following lines
+	containsBlankLine := false
+
+gatherLines:
+	for blockEnd < len(data) {
+		i++
+
+		// find the end of this line
+		for i < len(data) && data[i-1] != '\n' {
+			i++
+		}
+
+		// if it is an empty line, guess that it is part of this item
+		// and move on to the next line
+		if p.isEmpty(data[blockEnd:i]) > 0 {
+			containsBlankLine = true
+			blockEnd = i
+			continue
+		}
+
+		n := 0
+		if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
+			// this is the end of the block.
+			// we don't want to include this last line in the index.
+			break gatherLines
+		}
+
+		// if there were blank lines before this one, insert a new one now
+		if containsBlankLine {
+			raw.WriteByte('\n')
+			containsBlankLine = false
+		}
+
+		// get rid of that first tab, write to buffer
+		raw.Write(data[blockEnd+n : i])
+		hasBlock = true
+
+		blockEnd = i
+	}
+
+	if data[blockEnd-1] != '\n' {
+		raw.WriteByte('\n')
+	}
+
+	contents = raw.Bytes()
+
+	return
+}
+
+// isPunctuation returns true if c is a punctuation symbol.
+func isPunctuation(c byte) bool {
+	for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
+		if c == r {
+			return true
+		}
+	}
+	return false
+}
+
+// isSpace returns true if c is a white-space charactr
+func isSpace(c byte) bool {
+	return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
+}
+
+// isLetter returns true if c is ascii letter
+func isLetter(c byte) bool {
+	return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
+}
+
+// isAlnum returns true if c is a digit or letter
+// TODO: check when this is looking for ASCII alnum and when it should use unicode
+func isAlnum(c byte) bool {
+	return (c >= '0' && c <= '9') || isLetter(c)
+}
+
+// TODO: this is not used
+// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
+// always ends output with a newline
+func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
+	// first, check for common cases: no tabs, or only tabs at beginning of line
+	i, prefix := 0, 0
+	slowcase := false
+	for i = 0; i < len(line); i++ {
+		if line[i] == '\t' {
+			if prefix == i {
+				prefix++
+			} else {
+				slowcase = true
+				break
+			}
+		}
+	}
+
+	// no need to decode runes if all tabs are at the beginning of the line
+	if !slowcase {
+		for i = 0; i < prefix*tabSize; i++ {
+			out.WriteByte(' ')
+		}
+		out.Write(line[prefix:])
+		return
+	}
+
+	// the slow case: we need to count runes to figure out how
+	// many spaces to insert for each tab
+	column := 0
+	i = 0
+	for i < len(line) {
+		start := i
+		for i < len(line) && line[i] != '\t' {
+			_, size := utf8.DecodeRune(line[i:])
+			i += size
+			column++
+		}
+
+		if i > start {
+			out.Write(line[start:i])
+		}
+
+		if i >= len(line) {
+			break
+		}
+
+		for {
+			out.WriteByte(' ')
+			column++
+			if column%tabSize == 0 {
+				break
+			}
+		}
+
+		i++
+	}
+}
+
+// Find if a line counts as indented or not.
+// Returns number of characters the indent is (0 = not indented).
+func isIndented(data []byte, indentSize int) int {
+	if len(data) == 0 {
+		return 0
+	}
+	if data[0] == '\t' {
+		return 1
+	}
+	if len(data) < indentSize {
+		return 0
+	}
+	for i := 0; i < indentSize; i++ {
+		if data[i] != ' ' {
+			return 0
+		}
+	}
+	return indentSize
+}
+
+// Create a url-safe slug for fragments
+func slugify(in []byte) []byte {
+	if len(in) == 0 {
+		return in
+	}
+	out := make([]byte, 0, len(in))
+	sym := false
+
+	for _, ch := range in {
+		if isAlnum(ch) {
+			sym = false
+			out = append(out, ch)
+		} else if sym {
+			continue
+		} else {
+			out = append(out, '-')
+			sym = true
+		}
+	}
+	var a, b int
+	var ch byte
+	for a, ch = range out {
+		if ch != '-' {
+			break
+		}
+	}
+	for b = len(out) - 1; b > 0; b-- {
+		if out[b] != '-' {
+			break
+		}
+	}
+	return out[a : b+1]
+}
+
+func isListItem(d ast.Node) bool {
+	_, ok := d.(*ast.ListItem)
+	return ok
+}
diff --git a/vendor/github.com/gomarkdown/markdown/parser/ref.go b/vendor/github.com/gomarkdown/markdown/parser/ref.go
new file mode 100644
index 00000000..0b59a196
--- /dev/null
+++ b/vendor/github.com/gomarkdown/markdown/parser/ref.go
@@ -0,0 +1,89 @@
+package parser
+
+import (
+	"bytes"
+	"fmt"
+
+	"github.com/gomarkdown/markdown/ast"
+)
+
+// parse '(#r)', where r does not contain spaces. Or.
+// (!item) (!item, subitem), for an index, (!!item) signals primary.
+func maybeShortRefOrIndex(p *Parser, data []byte, offset int) (int, ast.Node) {
+	if len(data[offset:]) < 4 {
+		return 0, nil
+	}
+	// short ref first
+	data = data[offset:]
+	i := 1
+	switch data[i] {
+	case '#': // cross ref
+		i++
+	Loop:
+		for i < len(data) {
+			c := data[i]
+			switch {
+			case c == ')':
+				break Loop
+			case !isAlnum(c):
+				if c == '_' || c == '-' || c == ':' {
+					i++
+					continue
+				}
+				i = 0
+				break Loop
+			}
+			i++
+		}
+		if i >= len(data) {
+			return 0, nil
+		}
+		if data[i] != ')' {
+			return 0, nil
+		}
+
+		id := data[2:i]
+		node := &ast.CrossReference{}
+		node.Destination = id
+
+		return i + 1, node
+
+	case '!': // index
+		i++
+		start := i
+		i = skipUntilChar(data, start, ')')
+
+		// did we reach the end of the buffer without a closing marker?
+		if i >= len(data) {
+			return 0, nil
+		}
+
+		if len(data[start:i]) < 1 {
+			return 0, nil
+		}
+
+		idx := &ast.Index{}
+
+		idx.ID = fmt.Sprintf("idxref:%d", p.indexCnt)
+		p.indexCnt++
+
+		idx.Primary = data[start] == '!'
+		buf := data[start:i]
+
+		if idx.Primary {
+			buf = buf[1:]
+		}
+		items := bytes.Split(buf, []byte(","))
+		switch len(items) {
+		case 1:
+			idx.Item = bytes.TrimSpace(items[0])
+			return i + 1, idx
+		case 2:
+			idx.Item = bytes.TrimSpace(items[0])
+			idx.Subitem = bytes.TrimSpace(items[1])
+			return i + 1, idx
+		}
+	}
+
+	return 0, nil
+}
-- 
cgit v1.2.3