diff options
Diffstat (limited to 'vendor/golang.org/x/text/encoding/unicode')
3 files changed, 0 insertions, 812 deletions
diff --git a/vendor/golang.org/x/text/encoding/unicode/override.go b/vendor/golang.org/x/text/encoding/unicode/override.go deleted file mode 100644 index 35d62fcc..00000000 --- a/vendor/golang.org/x/text/encoding/unicode/override.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unicode - -import ( - "golang.org/x/text/transform" -) - -// BOMOverride returns a new decoder transformer that is identical to fallback, -// except that the presence of a Byte Order Mark at the start of the input -// causes it to switch to the corresponding Unicode decoding. It will only -// consider BOMs for UTF-8, UTF-16BE, and UTF-16LE. -// -// This differs from using ExpectBOM by allowing a BOM to switch to UTF-8, not -// just UTF-16 variants, and allowing falling back to any encoding scheme. -// -// This technique is recommended by the W3C for use in HTML 5: "For -// compatibility with deployed content, the byte order mark (also known as BOM) -// is considered more authoritative than anything else." -// http://www.w3.org/TR/encoding/#specification-hooks -// -// Using BOMOverride is mostly intended for use cases where the first characters -// of a fallback encoding are known to not be a BOM, for example, for valid HTML -// and most encodings. -func BOMOverride(fallback transform.Transformer) transform.Transformer { - // TODO: possibly allow a variadic argument of unicode encodings to allow - // specifying details of which fallbacks are supported as well as - // specifying the details of the implementations. This would also allow for - // support for UTF-32, which should not be supported by default. - return &bomOverride{fallback: fallback} -} - -type bomOverride struct { - fallback transform.Transformer - current transform.Transformer -} - -func (d *bomOverride) Reset() { - d.current = nil - d.fallback.Reset() -} - -var ( - // TODO: we could use decode functions here, instead of allocating a new - // decoder on every NewDecoder as IgnoreBOM decoders can be stateless. - utf16le = UTF16(LittleEndian, IgnoreBOM) - utf16be = UTF16(BigEndian, IgnoreBOM) -) - -const utf8BOM = "\ufeff" - -func (d *bomOverride) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if d.current != nil { - return d.current.Transform(dst, src, atEOF) - } - if len(src) < 3 && !atEOF { - return 0, 0, transform.ErrShortSrc - } - d.current = d.fallback - bomSize := 0 - if len(src) >= 2 { - if src[0] == 0xFF && src[1] == 0xFE { - d.current = utf16le.NewDecoder() - bomSize = 2 - } else if src[0] == 0xFE && src[1] == 0xFF { - d.current = utf16be.NewDecoder() - bomSize = 2 - } else if len(src) >= 3 && - src[0] == utf8BOM[0] && - src[1] == utf8BOM[1] && - src[2] == utf8BOM[2] { - d.current = transform.Nop - bomSize = 3 - } - } - if bomSize < len(src) { - nDst, nSrc, err = d.current.Transform(dst, src[bomSize:], atEOF) - } - return nDst, nSrc + bomSize, err -} diff --git a/vendor/golang.org/x/text/encoding/unicode/unicode.go b/vendor/golang.org/x/text/encoding/unicode/unicode.go deleted file mode 100644 index 579cadfb..00000000 --- a/vendor/golang.org/x/text/encoding/unicode/unicode.go +++ /dev/null @@ -1,434 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package unicode provides Unicode encodings such as UTF-16. -package unicode // import "golang.org/x/text/encoding/unicode" - -import ( - "errors" - "unicode/utf16" - "unicode/utf8" - - "golang.org/x/text/encoding" - "golang.org/x/text/encoding/internal" - "golang.org/x/text/encoding/internal/identifier" - "golang.org/x/text/internal/utf8internal" - "golang.org/x/text/runes" - "golang.org/x/text/transform" -) - -// TODO: I think the Transformers really should return errors on unmatched -// surrogate pairs and odd numbers of bytes. This is not required by RFC 2781, -// which leaves it open, but is suggested by WhatWG. It will allow for all error -// modes as defined by WhatWG: fatal, HTML and Replacement. This would require -// the introduction of some kind of error type for conveying the erroneous code -// point. - -// UTF8 is the UTF-8 encoding. -var UTF8 encoding.Encoding = utf8enc - -var utf8enc = &internal.Encoding{ - &internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()}, - "UTF-8", - identifier.UTF8, -} - -type utf8Decoder struct{ transform.NopResetter } - -func (utf8Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - var pSrc int // point from which to start copy in src - var accept utf8internal.AcceptRange - - // The decoder can only make the input larger, not smaller. - n := len(src) - if len(dst) < n { - err = transform.ErrShortDst - n = len(dst) - atEOF = false - } - for nSrc < n { - c := src[nSrc] - if c < utf8.RuneSelf { - nSrc++ - continue - } - first := utf8internal.First[c] - size := int(first & utf8internal.SizeMask) - if first == utf8internal.FirstInvalid { - goto handleInvalid // invalid starter byte - } - accept = utf8internal.AcceptRanges[first>>utf8internal.AcceptShift] - if nSrc+size > n { - if !atEOF { - // We may stop earlier than necessary here if the short sequence - // has invalid bytes. Not checking for this simplifies the code - // and may avoid duplicate computations in certain conditions. - if err == nil { - err = transform.ErrShortSrc - } - break - } - // Determine the maximal subpart of an ill-formed subsequence. - switch { - case nSrc+1 >= n || src[nSrc+1] < accept.Lo || accept.Hi < src[nSrc+1]: - size = 1 - case nSrc+2 >= n || src[nSrc+2] < utf8internal.LoCB || utf8internal.HiCB < src[nSrc+2]: - size = 2 - default: - size = 3 // As we are short, the maximum is 3. - } - goto handleInvalid - } - if c = src[nSrc+1]; c < accept.Lo || accept.Hi < c { - size = 1 - goto handleInvalid // invalid continuation byte - } else if size == 2 { - } else if c = src[nSrc+2]; c < utf8internal.LoCB || utf8internal.HiCB < c { - size = 2 - goto handleInvalid // invalid continuation byte - } else if size == 3 { - } else if c = src[nSrc+3]; c < utf8internal.LoCB || utf8internal.HiCB < c { - size = 3 - goto handleInvalid // invalid continuation byte - } - nSrc += size - continue - - handleInvalid: - // Copy the scanned input so far. - nDst += copy(dst[nDst:], src[pSrc:nSrc]) - - // Append RuneError to the destination. - const runeError = "\ufffd" - if nDst+len(runeError) > len(dst) { - return nDst, nSrc, transform.ErrShortDst - } - nDst += copy(dst[nDst:], runeError) - - // Skip the maximal subpart of an ill-formed subsequence according to - // the W3C standard way instead of the Go way. This Transform is - // probably the only place in the text repo where it is warranted. - nSrc += size - pSrc = nSrc - - // Recompute the maximum source length. - if sz := len(dst) - nDst; sz < len(src)-nSrc { - err = transform.ErrShortDst - n = nSrc + sz - atEOF = false - } - } - return nDst + copy(dst[nDst:], src[pSrc:nSrc]), nSrc, err -} - -// UTF16 returns a UTF-16 Encoding for the given default endianness and byte -// order mark (BOM) policy. -// -// When decoding from UTF-16 to UTF-8, if the BOMPolicy is IgnoreBOM then -// neither BOMs U+FEFF nor noncharacters U+FFFE in the input stream will affect -// the endianness used for decoding, and will instead be output as their -// standard UTF-8 encodings: "\xef\xbb\xbf" and "\xef\xbf\xbe". If the BOMPolicy -// is UseBOM or ExpectBOM a staring BOM is not written to the UTF-8 output. -// Instead, it overrides the default endianness e for the remainder of the -// transformation. Any subsequent BOMs U+FEFF or noncharacters U+FFFE will not -// affect the endianness used, and will instead be output as their standard -// UTF-8 encodings. For UseBOM, if there is no starting BOM, it will proceed -// with the default Endianness. For ExpectBOM, in that case, the transformation -// will return early with an ErrMissingBOM error. -// -// When encoding from UTF-8 to UTF-16, a BOM will be inserted at the start of -// the output if the BOMPolicy is UseBOM or ExpectBOM. Otherwise, a BOM will not -// be inserted. The UTF-8 input does not need to contain a BOM. -// -// There is no concept of a 'native' endianness. If the UTF-16 data is produced -// and consumed in a greater context that implies a certain endianness, use -// IgnoreBOM. Otherwise, use ExpectBOM and always produce and consume a BOM. -// -// In the language of http://www.unicode.org/faq/utf_bom.html#bom10, IgnoreBOM -// corresponds to "Where the precise type of the data stream is known... the -// BOM should not be used" and ExpectBOM corresponds to "A particular -// protocol... may require use of the BOM". -func UTF16(e Endianness, b BOMPolicy) encoding.Encoding { - return utf16Encoding{config{e, b}, mibValue[e][b&bomMask]} -} - -// mibValue maps Endianness and BOMPolicy settings to MIB constants. Note that -// some configurations map to the same MIB identifier. RFC 2781 has requirements -// and recommendations. Some of the "configurations" are merely recommendations, -// so multiple configurations could match. -var mibValue = map[Endianness][numBOMValues]identifier.MIB{ - BigEndian: [numBOMValues]identifier.MIB{ - IgnoreBOM: identifier.UTF16BE, - UseBOM: identifier.UTF16, // BigEnding default is preferred by RFC 2781. - // TODO: acceptBOM | strictBOM would map to UTF16BE as well. - }, - LittleEndian: [numBOMValues]identifier.MIB{ - IgnoreBOM: identifier.UTF16LE, - UseBOM: identifier.UTF16, // LittleEndian default is allowed and preferred on Windows. - // TODO: acceptBOM | strictBOM would map to UTF16LE as well. - }, - // ExpectBOM is not widely used and has no valid MIB identifier. -} - -// All lists a configuration for each IANA-defined UTF-16 variant. -var All = []encoding.Encoding{ - UTF8, - UTF16(BigEndian, UseBOM), - UTF16(BigEndian, IgnoreBOM), - UTF16(LittleEndian, IgnoreBOM), -} - -// BOMPolicy is a UTF-16 encoding's byte order mark policy. -type BOMPolicy uint8 - -const ( - writeBOM BOMPolicy = 0x01 - acceptBOM BOMPolicy = 0x02 - requireBOM BOMPolicy = 0x04 - bomMask BOMPolicy = 0x07 - - // HACK: numBOMValues == 8 triggers a bug in the 1.4 compiler (cannot have a - // map of an array of length 8 of a type that is also used as a key or value - // in another map). See golang.org/issue/11354. - // TODO: consider changing this value back to 8 if the use of 1.4.* has - // been minimized. - numBOMValues = 8 + 1 - - // IgnoreBOM means to ignore any byte order marks. - IgnoreBOM BOMPolicy = 0 - // Common and RFC 2781-compliant interpretation for UTF-16BE/LE. - - // UseBOM means that the UTF-16 form may start with a byte order mark, which - // will be used to override the default encoding. - UseBOM BOMPolicy = writeBOM | acceptBOM - // Common and RFC 2781-compliant interpretation for UTF-16. - - // ExpectBOM means that the UTF-16 form must start with a byte order mark, - // which will be used to override the default encoding. - ExpectBOM BOMPolicy = writeBOM | acceptBOM | requireBOM - // Used in Java as Unicode (not to be confused with Java's UTF-16) and - // ICU's UTF-16,version=1. Not compliant with RFC 2781. - - // TODO (maybe): strictBOM: BOM must match Endianness. This would allow: - // - UTF-16(B|L)E,version=1: writeBOM | acceptBOM | requireBOM | strictBOM - // (UnicodeBig and UnicodeLittle in Java) - // - RFC 2781-compliant, but less common interpretation for UTF-16(B|L)E: - // acceptBOM | strictBOM (e.g. assigned to CheckBOM). - // This addition would be consistent with supporting ExpectBOM. -) - -// Endianness is a UTF-16 encoding's default endianness. -type Endianness bool - -const ( - // BigEndian is UTF-16BE. - BigEndian Endianness = false - // LittleEndian is UTF-16LE. - LittleEndian Endianness = true -) - -// ErrMissingBOM means that decoding UTF-16 input with ExpectBOM did not find a -// starting byte order mark. -var ErrMissingBOM = errors.New("encoding: missing byte order mark") - -type utf16Encoding struct { - config - mib identifier.MIB -} - -type config struct { - endianness Endianness - bomPolicy BOMPolicy -} - -func (u utf16Encoding) NewDecoder() *encoding.Decoder { - return &encoding.Decoder{Transformer: &utf16Decoder{ - initial: u.config, - current: u.config, - }} -} - -func (u utf16Encoding) NewEncoder() *encoding.Encoder { - return &encoding.Encoder{Transformer: &utf16Encoder{ - endianness: u.endianness, - initialBOMPolicy: u.bomPolicy, - currentBOMPolicy: u.bomPolicy, - }} -} - -func (u utf16Encoding) ID() (mib identifier.MIB, other string) { - return u.mib, "" -} - -func (u utf16Encoding) String() string { - e, b := "B", "" - if u.endianness == LittleEndian { - e = "L" - } - switch u.bomPolicy { - case ExpectBOM: - b = "Expect" - case UseBOM: - b = "Use" - case IgnoreBOM: - b = "Ignore" - } - return "UTF-16" + e + "E (" + b + " BOM)" -} - -type utf16Decoder struct { - initial config - current config -} - -func (u *utf16Decoder) Reset() { - u.current = u.initial -} - -func (u *utf16Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if len(src) == 0 { - if atEOF && u.current.bomPolicy&requireBOM != 0 { - return 0, 0, ErrMissingBOM - } - return 0, 0, nil - } - if u.current.bomPolicy&acceptBOM != 0 { - if len(src) < 2 { - return 0, 0, transform.ErrShortSrc - } - switch { - case src[0] == 0xfe && src[1] == 0xff: - u.current.endianness = BigEndian - nSrc = 2 - case src[0] == 0xff && src[1] == 0xfe: - u.current.endianness = LittleEndian - nSrc = 2 - default: - if u.current.bomPolicy&requireBOM != 0 { - return 0, 0, ErrMissingBOM - } - } - u.current.bomPolicy = IgnoreBOM - } - - var r rune - var dSize, sSize int - for nSrc < len(src) { - if nSrc+1 < len(src) { - x := uint16(src[nSrc+0])<<8 | uint16(src[nSrc+1]) - if u.current.endianness == LittleEndian { - x = x>>8 | x<<8 - } - r, sSize = rune(x), 2 - if utf16.IsSurrogate(r) { - if nSrc+3 < len(src) { - x = uint16(src[nSrc+2])<<8 | uint16(src[nSrc+3]) - if u.current.endianness == LittleEndian { - x = x>>8 | x<<8 - } - // Save for next iteration if it is not a high surrogate. - if isHighSurrogate(rune(x)) { - r, sSize = utf16.DecodeRune(r, rune(x)), 4 - } - } else if !atEOF { - err = transform.ErrShortSrc - break - } - } - if dSize = utf8.RuneLen(r); dSize < 0 { - r, dSize = utf8.RuneError, 3 - } - } else if atEOF { - // Single trailing byte. - r, dSize, sSize = utf8.RuneError, 3, 1 - } else { - err = transform.ErrShortSrc - break - } - if nDst+dSize > len(dst) { - err = transform.ErrShortDst - break - } - nDst += utf8.EncodeRune(dst[nDst:], r) - nSrc += sSize - } - return nDst, nSrc, err -} - -func isHighSurrogate(r rune) bool { - return 0xDC00 <= r && r <= 0xDFFF -} - -type utf16Encoder struct { - endianness Endianness - initialBOMPolicy BOMPolicy - currentBOMPolicy BOMPolicy -} - -func (u *utf16Encoder) Reset() { - u.currentBOMPolicy = u.initialBOMPolicy -} - -func (u *utf16Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if u.currentBOMPolicy&writeBOM != 0 { - if len(dst) < 2 { - return 0, 0, transform.ErrShortDst - } - dst[0], dst[1] = 0xfe, 0xff - u.currentBOMPolicy = IgnoreBOM - nDst = 2 - } - - r, size := rune(0), 0 - for nSrc < len(src) { - r = rune(src[nSrc]) - - // Decode a 1-byte rune. - if r < utf8.RuneSelf { - size = 1 - - } else { - // Decode a multi-byte rune. - r, size = utf8.DecodeRune(src[nSrc:]) - if size == 1 { - // All valid runes of size 1 (those below utf8.RuneSelf) were - // handled above. We have invalid UTF-8 or we haven't seen the - // full character yet. - if !atEOF && !utf8.FullRune(src[nSrc:]) { - err = transform.ErrShortSrc - break - } - } - } - - if r <= 0xffff { - if nDst+2 > len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst+0] = uint8(r >> 8) - dst[nDst+1] = uint8(r) - nDst += 2 - } else { - if nDst+4 > len(dst) { - err = transform.ErrShortDst - break - } - r1, r2 := utf16.EncodeRune(r) - dst[nDst+0] = uint8(r1 >> 8) - dst[nDst+1] = uint8(r1) - dst[nDst+2] = uint8(r2 >> 8) - dst[nDst+3] = uint8(r2) - nDst += 4 - } - nSrc += size - } - - if u.endianness == LittleEndian { - for i := 0; i < nDst; i += 2 { - dst[i], dst[i+1] = dst[i+1], dst[i] - } - } - return nDst, nSrc, err -} diff --git a/vendor/golang.org/x/text/encoding/unicode/utf32/utf32.go b/vendor/golang.org/x/text/encoding/unicode/utf32/utf32.go deleted file mode 100644 index 48b21521..00000000 --- a/vendor/golang.org/x/text/encoding/unicode/utf32/utf32.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package utf32 provides the UTF-32 Unicode encoding. -// -// Please note that support for UTF-32 is discouraged as it is a rare and -// inefficient encoding, unfit for use as an interchange format. For use -// on the web, the W3C strongly discourages its use -// (https://www.w3.org/TR/html5/document-metadata.html#charset) -// while WHATWG directly prohibits supporting it -// (https://html.spec.whatwg.org/multipage/syntax.html#character-encodings). -package utf32 // import "golang.org/x/text/encoding/unicode/utf32" - -import ( - "errors" - "unicode/utf8" - - "golang.org/x/text/encoding" - "golang.org/x/text/encoding/internal/identifier" - "golang.org/x/text/transform" -) - -// All lists a configuration for each IANA-defined UTF-32 variant. -var All = []encoding.Encoding{ - UTF32(BigEndian, UseBOM), - UTF32(BigEndian, IgnoreBOM), - UTF32(LittleEndian, IgnoreBOM), -} - -// ErrMissingBOM means that decoding UTF-32 input with ExpectBOM did not -// find a starting byte order mark. -var ErrMissingBOM = errors.New("encoding: missing byte order mark") - -// UTF32 returns a UTF-32 Encoding for the given default endianness and -// byte order mark (BOM) policy. -// -// When decoding from UTF-32 to UTF-8, if the BOMPolicy is IgnoreBOM then -// neither BOMs U+FEFF nor ill-formed code units 0xFFFE0000 in the input -// stream will affect the endianness used for decoding. Instead BOMs will -// be output as their standard UTF-8 encoding "\xef\xbb\xbf" while -// 0xFFFE0000 code units will be output as "\xef\xbf\xbd", the standard -// UTF-8 encoding for the Unicode replacement character. If the BOMPolicy -// is UseBOM or ExpectBOM a starting BOM is not written to the UTF-8 -// output. Instead, it overrides the default endianness e for the remainder -// of the transformation. Any subsequent BOMs U+FEFF or ill-formed code -// units 0xFFFE0000 will not affect the endianness used, and will instead -// be output as their standard UTF-8 (replacement) encodings. For UseBOM, -// if there is no starting BOM, it will proceed with the default -// Endianness. For ExpectBOM, in that case, the transformation will return -// early with an ErrMissingBOM error. -// -// When encoding from UTF-8 to UTF-32, a BOM will be inserted at the start -// of the output if the BOMPolicy is UseBOM or ExpectBOM. Otherwise, a BOM -// will not be inserted. The UTF-8 input does not need to contain a BOM. -// -// There is no concept of a 'native' endianness. If the UTF-32 data is -// produced and consumed in a greater context that implies a certain -// endianness, use IgnoreBOM. Otherwise, use ExpectBOM and always produce -// and consume a BOM. -// -// In the language of http://www.unicode.org/faq/utf_bom.html#bom10, -// IgnoreBOM corresponds to "Where the precise type of the data stream is -// known... the BOM should not be used" and ExpectBOM corresponds to "A -// particular protocol... may require use of the BOM". -func UTF32(e Endianness, b BOMPolicy) encoding.Encoding { - return utf32Encoding{config{e, b}, mibValue[e][b&bomMask]} -} - -// mibValue maps Endianness and BOMPolicy settings to MIB constants for UTF-32. -// Note that some configurations map to the same MIB identifier. -var mibValue = map[Endianness][numBOMValues]identifier.MIB{ - BigEndian: [numBOMValues]identifier.MIB{ - IgnoreBOM: identifier.UTF32BE, - UseBOM: identifier.UTF32, - }, - LittleEndian: [numBOMValues]identifier.MIB{ - IgnoreBOM: identifier.UTF32LE, - UseBOM: identifier.UTF32, - }, - // ExpectBOM is not widely used and has no valid MIB identifier. -} - -// BOMPolicy is a UTF-32 encodings's byte order mark policy. -type BOMPolicy uint8 - -const ( - writeBOM BOMPolicy = 0x01 - acceptBOM BOMPolicy = 0x02 - requireBOM BOMPolicy = 0x04 - bomMask BOMPolicy = 0x07 - - // HACK: numBOMValues == 8 triggers a bug in the 1.4 compiler (cannot have a - // map of an array of length 8 of a type that is also used as a key or value - // in another map). See golang.org/issue/11354. - // TODO: consider changing this value back to 8 if the use of 1.4.* has - // been minimized. - numBOMValues = 8 + 1 - - // IgnoreBOM means to ignore any byte order marks. - IgnoreBOM BOMPolicy = 0 - // Unicode-compliant interpretation for UTF-32BE/LE. - - // UseBOM means that the UTF-32 form may start with a byte order mark, - // which will be used to override the default encoding. - UseBOM BOMPolicy = writeBOM | acceptBOM - // Unicode-compliant interpretation for UTF-32. - - // ExpectBOM means that the UTF-32 form must start with a byte order mark, - // which will be used to override the default encoding. - ExpectBOM BOMPolicy = writeBOM | acceptBOM | requireBOM - // Consistent with BOMPolicy definition in golang.org/x/text/encoding/unicode -) - -// Endianness is a UTF-32 encoding's default endianness. -type Endianness bool - -const ( - // BigEndian is UTF-32BE. - BigEndian Endianness = false - // LittleEndian is UTF-32LE. - LittleEndian Endianness = true -) - -type config struct { - endianness Endianness - bomPolicy BOMPolicy -} - -type utf32Encoding struct { - config - mib identifier.MIB -} - -func (u utf32Encoding) NewDecoder() *encoding.Decoder { - return &encoding.Decoder{Transformer: &utf32Decoder{ - initial: u.config, - current: u.config, - }} -} - -func (u utf32Encoding) NewEncoder() *encoding.Encoder { - return &encoding.Encoder{Transformer: &utf32Encoder{ - endianness: u.endianness, - initialBOMPolicy: u.bomPolicy, - currentBOMPolicy: u.bomPolicy, - }} -} - -func (u utf32Encoding) ID() (mib identifier.MIB, other string) { - return u.mib, "" -} - -func (u utf32Encoding) String() string { - e, b := "B", "" - if u.endianness == LittleEndian { - e = "L" - } - switch u.bomPolicy { - case ExpectBOM: - b = "Expect" - case UseBOM: - b = "Use" - case IgnoreBOM: - b = "Ignore" - } - return "UTF-32" + e + "E (" + b + " BOM)" -} - -type utf32Decoder struct { - initial config - current config -} - -func (u *utf32Decoder) Reset() { - u.current = u.initial -} - -func (u *utf32Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if len(src) == 0 { - if atEOF && u.current.bomPolicy&requireBOM != 0 { - return 0, 0, ErrMissingBOM - } - return 0, 0, nil - } - if u.current.bomPolicy&acceptBOM != 0 { - if len(src) < 4 { - return 0, 0, transform.ErrShortSrc - } - switch { - case src[0] == 0x00 && src[1] == 0x00 && src[2] == 0xfe && src[3] == 0xff: - u.current.endianness = BigEndian - nSrc = 4 - case src[0] == 0xff && src[1] == 0xfe && src[2] == 0x00 && src[3] == 0x00: - u.current.endianness = LittleEndian - nSrc = 4 - default: - if u.current.bomPolicy&requireBOM != 0 { - return 0, 0, ErrMissingBOM - } - } - u.current.bomPolicy = IgnoreBOM - } - - var r rune - var dSize, sSize int - for nSrc < len(src) { - if nSrc+3 < len(src) { - x := uint32(src[nSrc+0])<<24 | uint32(src[nSrc+1])<<16 | - uint32(src[nSrc+2])<<8 | uint32(src[nSrc+3]) - if u.current.endianness == LittleEndian { - x = x>>24 | (x >> 8 & 0x0000FF00) | (x << 8 & 0x00FF0000) | x<<24 - } - r, sSize = rune(x), 4 - if dSize = utf8.RuneLen(r); dSize < 0 { - r, dSize = utf8.RuneError, 3 - } - } else if atEOF { - // 1..3 trailing bytes. - r, dSize, sSize = utf8.RuneError, 3, len(src)-nSrc - } else { - err = transform.ErrShortSrc - break - } - if nDst+dSize > len(dst) { - err = transform.ErrShortDst - break - } - nDst += utf8.EncodeRune(dst[nDst:], r) - nSrc += sSize - } - return nDst, nSrc, err -} - -type utf32Encoder struct { - endianness Endianness - initialBOMPolicy BOMPolicy - currentBOMPolicy BOMPolicy -} - -func (u *utf32Encoder) Reset() { - u.currentBOMPolicy = u.initialBOMPolicy -} - -func (u *utf32Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if u.currentBOMPolicy&writeBOM != 0 { - if len(dst) < 4 { - return 0, 0, transform.ErrShortDst - } - dst[0], dst[1], dst[2], dst[3] = 0x00, 0x00, 0xfe, 0xff - u.currentBOMPolicy = IgnoreBOM - nDst = 4 - } - - r, size := rune(0), 0 - for nSrc < len(src) { - r = rune(src[nSrc]) - - // Decode a 1-byte rune. - if r < utf8.RuneSelf { - size = 1 - - } else { - // Decode a multi-byte rune. - r, size = utf8.DecodeRune(src[nSrc:]) - if size == 1 { - // All valid runes of size 1 (those below utf8.RuneSelf) were - // handled above. We have invalid UTF-8 or we haven't seen the - // full character yet. - if !atEOF && !utf8.FullRune(src[nSrc:]) { - err = transform.ErrShortSrc - break - } - } - } - - if nDst+4 > len(dst) { - err = transform.ErrShortDst - break - } - - dst[nDst+0] = uint8(r >> 24) - dst[nDst+1] = uint8(r >> 16) - dst[nDst+2] = uint8(r >> 8) - dst[nDst+3] = uint8(r) - nDst += 4 - nSrc += size - } - - if u.endianness == LittleEndian { - for i := 0; i < nDst; i += 4 { - dst[i], dst[i+1], dst[i+2], dst[i+3] = dst[i+3], dst[i+2], dst[i+1], dst[i] - } - } - return nDst, nSrc, err -} |